From 131da7ba1882b342231e969eda637004538048df Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 19 Jun 2019 00:57:01 +0000 Subject: [PATCH 01/73] Revert "Revert "Remove the rest of v1 manifest support"" This reverts commit f695e98cb7cbb39eb7e5bd0a268ae1cd2e594fc1. Signed-off-by: Tibor Vass --- cmd/dockerd/config.go | 2 - cmd/dockerd/daemon.go | 8 --- cmd/dockerd/daemon_unix.go | 4 -- cmd/dockerd/daemon_windows.go | 5 -- daemon/config/config.go | 6 -- daemon/daemon.go | 5 +- daemon/images/image_push.go | 1 - daemon/images/service.go | 4 -- daemon/trustkey.go | 57 ------------------ daemon/trustkey_test.go | 71 ----------------------- daemon/uuid.go | 35 +++++++++++ distribution/config.go | 4 -- distribution/push_v2.go | 27 +-------- integration-cli/docker_cli_daemon_test.go | 23 ++++---- 14 files changed, 52 insertions(+), 200 deletions(-) delete mode 100644 daemon/trustkey.go delete mode 100644 daemon/trustkey_test.go create mode 100644 daemon/uuid.go diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go index b28ac1cdc4ddf..317315414f040 100644 --- a/cmd/dockerd/config.go +++ b/cmd/dockerd/config.go @@ -12,8 +12,6 @@ import ( const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 - // defaultTrustKeyFile is the default filename for the trust key - defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index e2afe6f80f202..4a2962409ac4c 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -429,14 +429,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } - if conf.TrustKeyPath == "" { - daemonConfDir, err := getDaemonConfDir(conf.Root) - if err != nil { - return nil, err - } - conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) - } - if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go index a6685bb668769..a0f1b893b49bb 100644 --- a/cmd/dockerd/daemon_unix.go +++ b/cmd/dockerd/daemon_unix.go @@ -58,10 +58,6 @@ func setDefaultUmask() error { return nil } -func getDaemonConfDir(_ string) (string, error) { - return getDefaultDaemonConfigDir() -} - func (cli *DaemonCli) getPlatformContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts := []supervisor.DaemonOpt{ supervisor.WithOOMScore(cli.Config.OOMScoreAdjust), diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go index 46932760cdec7..bf7b9efa3f63c 100644 --- a/cmd/dockerd/daemon_windows.go +++ b/cmd/dockerd/daemon_windows.go @@ -5,7 +5,6 @@ import ( "fmt" "net" "os" - "path/filepath" "time" "github.com/docker/docker/daemon/config" @@ -24,10 +23,6 @@ func setDefaultUmask() error { return nil } -func getDaemonConfDir(root string) (string, error) { - return filepath.Join(root, `\config`), nil -} - // preNotifySystem sends a message to the host when the API is active, but before the daemon is func preNotifySystem() { // start the service now to prevent timeouts waiting for daemon to start diff --git a/daemon/config/config.go b/daemon/config/config.go index 80ecbbd9550d6..549009c42029b 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -136,12 +136,6 @@ type CommonConfig struct { SocketGroup string `json:"group,omitempty"` CorsHeaders string `json:"api-cors-header,omitempty"` - // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests - // when pushing to a registry which does not support schema 2. This field is marked as - // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the - // daemon ID will use a dedicated identifier not shared with exported signatures. - TrustKeyPath string `json:"deprecated-key-path,omitempty"` - // LiveRestoreEnabled determines whether we should keep containers // alive upon daemon shutdown/start LiveRestoreEnabled bool `json:"live-restore,omitempty"` diff --git a/daemon/daemon.go b/daemon/daemon.go index 6bde9c94e61ab..420cfb12b64cd 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -973,7 +973,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) + uuid, err := loadOrCreateUUID(filepath.Join(config.Root, "engine_uuid")) if err != nil { return nil, err } @@ -1018,7 +1018,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, errors.New("Devices cgroup isn't mounted") } - d.ID = trustKey.PublicKey().KeyID() + d.ID = uuid d.repository = daemonRepo d.containers = container.NewMemoryStore() if d.containersReplica, err = container.NewViewDB(); err != nil { @@ -1049,7 +1049,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S MaxConcurrentUploads: *config.MaxConcurrentUploads, ReferenceStore: rs, RegistryService: registryService, - TrustKey: trustKey, }) go d.execCommandGC() diff --git a/daemon/images/image_push.go b/daemon/images/image_push.go index 4c7be8d2e9749..c397b1cd5218e 100644 --- a/daemon/images/image_push.go +++ b/daemon/images/image_push.go @@ -54,7 +54,6 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea }, ConfigMediaType: schema2.MediaTypeImageConfig, LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), - TrustKey: i.trustKey, UploadManager: i.uploadManager, } diff --git a/daemon/images/service.go b/daemon/images/service.go index e8df5cb649858..9034e5f37ced8 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -14,7 +14,6 @@ import ( "github.com/docker/docker/layer" dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "github.com/docker/libtrust" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -40,7 +39,6 @@ type ImageServiceConfig struct { MaxConcurrentUploads int ReferenceStore dockerreference.Store RegistryService registry.Service - TrustKey libtrust.PrivateKey } // NewImageService returns a new ImageService from a configuration @@ -56,7 +54,6 @@ func NewImageService(config ImageServiceConfig) *ImageService { layerStores: config.LayerStores, referenceStore: config.ReferenceStore, registryService: config.RegistryService, - trustKey: config.TrustKey, uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), } } @@ -72,7 +69,6 @@ type ImageService struct { pruneRunning int32 referenceStore dockerreference.Store registryService registry.Service - trustKey libtrust.PrivateKey uploadManager *xfer.LayerUploadManager } diff --git a/daemon/trustkey.go b/daemon/trustkey.go deleted file mode 100644 index 4d72c932f1485..0000000000000 --- a/daemon/trustkey.go +++ /dev/null @@ -1,57 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "encoding/json" - "encoding/pem" - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/libtrust" -) - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need -// a refactor or this function to be moved into libtrust -func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0755, "") - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/daemon/trustkey_test.go b/daemon/trustkey_test.go deleted file mode 100644 index e49e76aa3eeec..0000000000000 --- a/daemon/trustkey_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "gotest.tools/assert" - is "gotest.tools/assert/cmp" - "gotest.tools/fs" -) - -// LoadOrCreateTrustKey -func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - assert.NilError(t, err) - defer os.RemoveAll(tmpKeyFolderPath) - - tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") - assert.NilError(t, err) - - _, err = loadOrCreateTrustKey(tmpKeyFile.Name()) - assert.Check(t, is.ErrorContains(err, "Error loading key file")) -} - -func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) { - tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") - defer tmpKeyFolderPath.Remove() - - // Without the need to create the folder hierarchy - tmpKeyFile := tmpKeyFolderPath.Join("keyfile") - - key, err := loadOrCreateTrustKey(tmpKeyFile) - assert.NilError(t, err) - assert.Check(t, key != nil) - - _, err = os.Stat(tmpKeyFile) - assert.NilError(t, err, "key file doesn't exist") -} - -func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) { - tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") - defer tmpKeyFolderPath.Remove() - tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile") - - key, err := loadOrCreateTrustKey(tmpKeyFile) - assert.NilError(t, err) - assert.Check(t, key != nil) - - _, err = os.Stat(tmpKeyFile) - assert.NilError(t, err, "key file doesn't exist") -} - -func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) { - defer os.Remove("keyfile") - key, err := loadOrCreateTrustKey("keyfile") - assert.NilError(t, err) - assert.Check(t, key != nil) - - _, err = os.Stat("keyfile") - assert.NilError(t, err, "key file doesn't exist") -} - -func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { - tmpKeyFile := filepath.Join("testdata", "keyfile") - key, err := loadOrCreateTrustKey(tmpKeyFile) - assert.NilError(t, err) - expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY" - assert.Check(t, is.Contains(key.String(), expected)) -} diff --git a/daemon/uuid.go b/daemon/uuid.go new file mode 100644 index 0000000000000..9640866f2a25a --- /dev/null +++ b/daemon/uuid.go @@ -0,0 +1,35 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/google/uuid" +) + +func loadOrCreateUUID(path string) (string, error) { + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return "", err + } + var id string + idb, err := ioutil.ReadFile(path) + if os.IsNotExist(err) { + id = uuid.New().String() + if err := ioutils.AtomicWriteFile(path, []byte(id), os.FileMode(0600)); err != nil { + return "", fmt.Errorf("Error saving uuid file: %s", err) + } + } else if err != nil { + return "", fmt.Errorf("Error loading uuid file %s: %s", path, err) + } else { + idp, err := uuid.Parse(string(idb)) + if err != nil { + return "", fmt.Errorf("Error parsing uuid in file %s: %s", path, err) + } + id = idp.String() + } + return id, nil +} diff --git a/distribution/config.go b/distribution/config.go index 438051c296e27..e9631d1b8c0e7 100644 --- a/distribution/config.go +++ b/distribution/config.go @@ -18,7 +18,6 @@ import ( "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "github.com/docker/libtrust" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -73,9 +72,6 @@ type ImagePushConfig struct { ConfigMediaType string // LayerStores (indexed by operating system) manages layers. LayerStores map[string]PushLayerProvider - // TrustKey is the private key for legacy signatures. This is typically - // an ephemeral key, since these signatures are no longer verified. - TrustKey libtrust.PrivateKey // UploadManager dispatches uploads. UploadManager *xfer.LayerUploadManager } diff --git a/distribution/push_v2.go b/distribution/push_v2.go index e15384eba7e31..8bc0edd4c165e 100644 --- a/distribution/push_v2.go +++ b/distribution/push_v2.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "runtime" "sort" "strings" "sync" @@ -181,30 +180,8 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { - logrus.Warnf("failed to upload schema2 manifest: %v", err) - return err - } - - logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) - - msg := schema1DeprecationMessage(ref) - logrus.Warn(msg) - progress.Message(p.config.ProgressOutput, "", msg) - - manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) - if err != nil { - return err - } - builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) - manifest, err = manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - return err - } + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err } var canonicalManifest []byte diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index fd3384e39d22b..9d4938993d9e1 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -18,6 +18,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "strconv" "strings" "sync" @@ -35,7 +36,6 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/go-units" "github.com/docker/libnetwork/iptables" - "github.com/docker/libtrust" "github.com/go-check/check" "github.com/kr/pty" "golang.org/x/sys/unix" @@ -551,20 +551,23 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { } } -func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { - // TODO: skip or update for Windows daemon - os.Remove("/etc/docker/key.json") +func (s *DockerDaemonSuite) TestDaemonUUIDGeneration(c *check.C) { + dir := "/var/lib/docker" + if runtime.GOOS == "windows" { + dir = filepath.Join(os.Getenv("programdata"), "docker") + } + file := filepath.Join(dir, "engine_uuid") + os.Remove(file) s.d.Start(c) s.d.Stop(c) - k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + fi, err := os.Stat(file) if err != nil { - c.Fatalf("Error opening key file") + c.Fatalf("Error opening uuid file") } - kid := k.KeyID() - // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) - if len(kid) != 59 { - c.Fatalf("Bad key ID: %s", kid) + // Test for uuid length + if fi.Size() != 36 { + c.Fatalf("Bad UUID size %d", fi.Size()) } } From 4a7720d98f817b8a1eb833eaea3f19724a86f41b Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 19 Jun 2019 00:57:29 +0000 Subject: [PATCH 02/73] Revert "use gotest.tools assertions in docker_cli_push_test.go" This reverts commit 0811297608499e8be473473b80eaac9d2ced9dd8. Signed-off-by: Tibor Vass --- integration-cli/docker_cli_push_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 60d5811ff9a40..ebef8493efcec 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -252,31 +252,31 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c dockerCmd(c, "tag", "busybox", sourceRepoName) // push the image to the registry out1, _, err := dockerCmdWithError("push", sourceRepoName) - assert.NilError(c, err, fmt.Sprintf("pushing the image to the private registry has failed: %s", out1)) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) // ensure that none of the layers were mounted from another repository during push - assert.Assert(c, !strings.Contains(out1, "Mounted from")) + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) digest1 := reference.DigestRegexp.FindString(out1) - assert.Assert(c, len(digest1) > 0, "no digest found for pushed manifest") + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) // retag the image to upload the same layers to another repo in the same registry dockerCmd(c, "tag", "busybox", destRepoName) // push the image to the registry out2, _, err := dockerCmdWithError("push", destRepoName) - assert.NilError(c, err, fmt.Sprintf("pushing the image to the private registry has failed: %s", out2)) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen - assert.Assert(c, !strings.Contains(out2, "Mounted from")) + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) digest2 := reference.DigestRegexp.FindString(out2) - assert.Assert(c, len(digest2) > 0, "no digest found for pushed manifest") - assert.Assert(c, digest1 != digest2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) // ensure that we can pull and run the second pushed repository dockerCmd(c, "rmi", destRepoName) dockerCmd(c, "pull", destRepoName) out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") - assert.Assert(c, out3 == "hello world") + c.Assert(out3, check.Equals, "hello world") } func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { From 79115e60584ecc6f1e55c5a4b06dd570c565e514 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 19 Jun 2019 00:57:30 +0000 Subject: [PATCH 03/73] Revert "Revert "Remove Schema1 integration test suite"" This reverts commit f23a51a8603a92b124e5037106958baf8c70c5e1. Signed-off-by: Tibor Vass --- Dockerfile | 12 -- integration-cli/check_test.go | 33 ---- integration-cli/docker_cli_by_digest_test.go | 143 ------------------ integration-cli/docker_cli_daemon_test.go | 74 --------- integration-cli/docker_cli_pull_local_test.go | 24 --- integration-cli/docker_cli_push_test.go | 57 ------- integration/system/uuid_test.go | 23 +++ 7 files changed, 23 insertions(+), 343 deletions(-) create mode 100644 integration/system/uuid_test.go diff --git a/Dockerfile b/Dockerfile index 45aa84559010b..12fba7578b515 100644 --- a/Dockerfile +++ b/Dockerfile @@ -51,11 +51,6 @@ RUN apt-get update && apt-get install -y \ && make PREFIX=/build/ install-criu FROM base AS registry -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN set -x \ && export GOPATH="$(mktemp -d)" \ @@ -63,13 +58,6 @@ RUN set -x \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ - && case $(dpkg --print-architecture) in \ - amd64|ppc64*|s390x) \ - (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \ - GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ - go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ - ;; \ - esac \ && rm -rf "$GOPATH" diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go index 2a0b1a25d53bd..c344b63bd3c94 100644 --- a/integration-cli/check_test.go +++ b/integration-cli/check_test.go @@ -141,39 +141,6 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) { s.ds.TearDownTest(c) } -func init() { - check.Suite(&DockerSchema1RegistrySuite{ - ds: &DockerSuite{}, - }) -} - -type DockerSchema1RegistrySuite struct { - ds *DockerSuite - reg *registry.V2 - d *daemon.Daemon -} - -func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { - s.d.DumpStackAndQuit() -} - -func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon) - s.reg = registry.NewV2(c, registry.Schema1) - s.reg.WaitReady(c) - s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) -} - -func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.d != nil { - s.d.Stop(c) - } - s.ds.TearDownTest(c) -} - func init() { check.Suite(&DockerRegistryAuthHtpasswdSuite{ ds: &DockerSuite{}, diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go index c884d3175112c..dd5549c2bd8ca 100644 --- a/integration-cli/docker_cli_by_digest_test.go +++ b/integration-cli/docker_cli_by_digest_test.go @@ -3,12 +3,9 @@ package main import ( "encoding/json" "fmt" - "os" - "path/filepath" "regexp" "strings" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" @@ -80,10 +77,6 @@ func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { testPullByTagDisplaysDigest(c) } -func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { - testPullByTagDisplaysDigest(c) -} - func testPullByDigest(c *check.C) { testRequires(c, DaemonIsLinux) pushDigest, err := setupImage(c) @@ -106,10 +99,6 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { testPullByDigest(c) } -func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { - testPullByDigest(c) -} - func testPullByDigestNoFallback(c *check.C) { testRequires(c, DaemonIsLinux) // pull from the registry using the @ reference @@ -123,10 +112,6 @@ func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { testPullByDigestNoFallback(c) } -func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { - testPullByDigestNoFallback(c) -} - func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { pushDigest, err := setupImage(c) assert.NilError(c, err, "error setting up image") @@ -561,131 +546,3 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) assert.Assert(c, is.Contains(out, expectedErrorMsg)) } - -// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when -// we have modified a manifest blob and its digest cannot be verified. -// This is the schema1 version of the test. -func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // Load the target manifest blob. - manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) - - var imgManifest schema1.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) - - // Change a layer in the manifest. - imgManifest.FSLayers[0] = schema1.FSLayer{ - BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), - } - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.TempMoveBlobData(c, manifestDigest) - defer undo() - - alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") - c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) - - s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the manifest digest. - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0) - - expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) - c.Assert(out, checker.Contains, expectedErrorMsg) -} - -// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when -// we have modified a layer blob and its digest cannot be verified. -// This is the schema2 version of the test. -func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil) - - // Load the target manifest blob. - manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) - - var imgManifest schema2.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil) - - // Next, get the digest of one of the layers from the manifest. - targetLayerDigest := imgManifest.Layers[0].Digest - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.TempMoveBlobData(c, targetLayerDigest) - defer undo() - - // Now make a fake data blob in this directory. - s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the target layer digest. - - // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil { - c.Fatalf("error clearing distribution cache: %v", err) - } - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) - - expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) - c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) -} - -// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when -// we have modified a layer blob and its digest cannot be verified. -// This is the schema1 version of the test. -func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil) - - // Load the target manifest blob. - manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) - - var imgManifest schema1.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil) - - // Next, get the digest of one of the layers from the manifest. - targetLayerDigest := imgManifest.FSLayers[0].BlobSum - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.TempMoveBlobData(c, targetLayerDigest) - defer undo() - - // Now make a fake data blob in this directory. - s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the target layer digest. - - // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil { - c.Fatalf("error clearing distribution cache: %v", err) - } - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) - - expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) - c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) -} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 9d4938993d9e1..c9e88fa58ae4d 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -18,7 +18,6 @@ import ( "path" "path/filepath" "regexp" - "runtime" "strconv" "strings" "sync" @@ -551,26 +550,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { } } -func (s *DockerDaemonSuite) TestDaemonUUIDGeneration(c *check.C) { - dir := "/var/lib/docker" - if runtime.GOOS == "windows" { - dir = filepath.Join(os.Getenv("programdata"), "docker") - } - file := filepath.Join(dir, "engine_uuid") - os.Remove(file) - s.d.Start(c) - s.d.Stop(c) - - fi, err := os.Stat(file) - if err != nil { - c.Fatalf("Error opening uuid file") - } - // Test for uuid length - if fi.Size() != 36 { - c.Fatalf("Bad UUID size %d", fi.Size()) - } -} - // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required @@ -1195,59 +1174,6 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { } } -func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { - type Config struct { - Crv string `json:"crv"` - D string `json:"d"` - Kid string `json:"kid"` - Kty string `json:"kty"` - X string `json:"x"` - Y string `json:"y"` - } - - os.Remove("/etc/docker/key.json") - s.d.Start(c) - s.d.Stop(c) - - config := &Config{} - bytes, err := ioutil.ReadFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error reading key.json file: %s", err) - } - - // byte[] to Data-Struct - if err := json.Unmarshal(bytes, &config); err != nil { - c.Fatalf("Error Unmarshal: %s", err) - } - - //replace config.Kid with the fake value - config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" - - // NEW Data-Struct to byte[] - newBytes, err := json.Marshal(&config) - if err != nil { - c.Fatalf("Error Marshal: %s", err) - } - - // write back - if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { - c.Fatalf("Error ioutil.WriteFile: %s", err) - } - - defer os.Remove("/etc/docker/key.json") - - if err := s.d.StartWithError(); err == nil { - c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) - } - - content, err := s.d.ReadLogFile() - c.Assert(err, checker.IsNil) - - if !strings.Contains(string(content), "Public Key ID does not match") { - c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) - } -} - func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { s.d.StartWithBusybox(c) diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go index b6b774ea446d7..0ddf7770d1652 100644 --- a/integration-cli/docker_cli_pull_local_test.go +++ b/integration-cli/docker_cli_pull_local_test.go @@ -56,10 +56,6 @@ func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { testPullImageWithAliases(c) } -func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { - testPullImageWithAliases(c) -} - // testConcurrentPullWholeRepo pulls the same repo concurrently. func testConcurrentPullWholeRepo(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) @@ -112,10 +108,6 @@ func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { testConcurrentPullWholeRepo(c) } -func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { - testConcurrentPullWholeRepo(c) -} - // testConcurrentFailingPull tries a concurrent pull that doesn't succeed. func testConcurrentFailingPull(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) @@ -143,10 +135,6 @@ func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { testConcurrentFailingPull(c) } -func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { - testConcurrentFailingPull(c) -} - // testConcurrentPullMultipleTags pulls multiple tags from the same repo // concurrently. func testConcurrentPullMultipleTags(c *check.C) { @@ -199,10 +187,6 @@ func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { testConcurrentPullMultipleTags(c) } -func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { - testConcurrentPullMultipleTags(c) -} - // testPullIDStability verifies that pushing an image and pulling it back // preserves the image ID. func testPullIDStability(c *check.C) { @@ -260,10 +244,6 @@ func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { testPullIDStability(c) } -func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { - testPullIDStability(c) -} - // #21213 func testPullNoLayers(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) @@ -280,10 +260,6 @@ func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { testPullNoLayers(c) } -func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { - testPullNoLayers(c) -} - func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { testRequires(c, NotArm) pushDigest, err := setupImage(c) diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index ebef8493efcec..40fb2b8417cc0 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -30,10 +30,6 @@ func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { testPushBusyboxImage(c) } -func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { - testPushBusyboxImage(c) -} - // pushing an image without a prefix should throw an error func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { out, _, err := dockerCmdWithError("push", "busybox") @@ -53,10 +49,6 @@ func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { testPushUntagged(c) } -func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { - testPushUntagged(c) -} - func testPushBadTag(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) expected := "does not exist" @@ -70,10 +62,6 @@ func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { testPushBadTag(c) } -func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { - testPushBadTag(c) -} - func testPushMultipleTags(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) @@ -115,10 +103,6 @@ func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { testPushMultipleTags(c) } -func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { - testPushMultipleTags(c) -} - func testPushEmptyLayer(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") @@ -146,10 +130,6 @@ func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { testPushEmptyLayer(c) } -func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { - testPushEmptyLayer(c) -} - // testConcurrentPush pushes multiple tags to the same repo // concurrently. func testConcurrentPush(c *check.C) { @@ -200,10 +180,6 @@ func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { testConcurrentPush(c) } -func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { - testConcurrentPush(c) -} - func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry @@ -246,39 +222,6 @@ func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { assert.Equal(c, out4, "hello world") } -func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { - sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - // tag the image to upload it to the private registry - dockerCmd(c, "tag", "busybox", sourceRepoName) - // push the image to the registry - out1, _, err := dockerCmdWithError("push", sourceRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) - // ensure that none of the layers were mounted from another repository during push - c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) - - digest1 := reference.DigestRegexp.FindString(out1) - c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - - destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) - // retag the image to upload the same layers to another repo in the same registry - dockerCmd(c, "tag", "busybox", destRepoName) - // push the image to the registry - out2, _, err := dockerCmdWithError("push", destRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) - // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen - c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) - - digest2 := reference.DigestRegexp.FindString(out2) - c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - c.Assert(digest1, check.Not(check.Equals), digest2) - - // ensure that we can pull and run the second pushed repository - dockerCmd(c, "rmi", destRepoName) - dockerCmd(c, "pull", destRepoName) - out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") - c.Assert(out3, check.Equals, "hello world") -} - func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) dockerCmd(c, "tag", "busybox", repoName) diff --git a/integration/system/uuid_test.go b/integration/system/uuid_test.go new file mode 100644 index 0000000000000..d6df8fd4c8b1c --- /dev/null +++ b/integration/system/uuid_test.go @@ -0,0 +1,23 @@ +package system + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types/versions" + "github.com/google/uuid" + "gotest.tools/assert" + "gotest.tools/skip" +) + +func TestUUIDGeneration(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "ID format changed") + defer setupTest(t)() + + c := testEnv.APIClient() + info, err := c.Info(context.Background()) + assert.NilError(t, err) + + _, err = uuid.Parse(info.ID) + assert.NilError(t, err, info.ID) +} From aa439dfb529a2a85b949fb703a7ec8a07b07330a Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 20 Jun 2019 01:20:20 +0000 Subject: [PATCH 04/73] distribution: remove v2 schema1 push Manifest v2 schema1 was deprecated in 4866f5139a1 and this commit removes the push code for v2 schema1. Signed-off-by: Tibor Vass --- cmd/dockerd/config.go | 2 ++ cmd/dockerd/daemon.go | 8 ++++++ cmd/dockerd/daemon_unix.go | 4 +++ cmd/dockerd/daemon_windows.go | 5 ++++ daemon/config/config.go | 6 +++++ daemon/daemon.go | 4 +-- daemon/id.go | 43 +++++++++++++++++++++++++++++++++ daemon/uuid.go | 35 --------------------------- distribution/push_v2.go | 5 ++-- integration/system/uuid_test.go | 6 ++--- 10 files changed, 74 insertions(+), 44 deletions(-) create mode 100644 daemon/id.go delete mode 100644 daemon/uuid.go diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go index 317315414f040..b28ac1cdc4ddf 100644 --- a/cmd/dockerd/config.go +++ b/cmd/dockerd/config.go @@ -12,6 +12,8 @@ import ( const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 + // defaultTrustKeyFile is the default filename for the trust key + defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 4a2962409ac4c..e2b3039095e2e 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -429,6 +429,14 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } + if conf.DeprecatedTrustKeyPath == "" { + daemonConfDir, err := getDaemonConfDir(conf.Root) + if err != nil { + return nil, err + } + conf.DeprecatedTrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) + } + if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go index a0f1b893b49bb..a6685bb668769 100644 --- a/cmd/dockerd/daemon_unix.go +++ b/cmd/dockerd/daemon_unix.go @@ -58,6 +58,10 @@ func setDefaultUmask() error { return nil } +func getDaemonConfDir(_ string) (string, error) { + return getDefaultDaemonConfigDir() +} + func (cli *DaemonCli) getPlatformContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts := []supervisor.DaemonOpt{ supervisor.WithOOMScore(cli.Config.OOMScoreAdjust), diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go index bf7b9efa3f63c..46932760cdec7 100644 --- a/cmd/dockerd/daemon_windows.go +++ b/cmd/dockerd/daemon_windows.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "os" + "path/filepath" "time" "github.com/docker/docker/daemon/config" @@ -23,6 +24,10 @@ func setDefaultUmask() error { return nil } +func getDaemonConfDir(root string) (string, error) { + return filepath.Join(root, `\config`), nil +} + // preNotifySystem sends a message to the host when the API is active, but before the daemon is func preNotifySystem() { // start the service now to prevent timeouts waiting for daemon to start diff --git a/daemon/config/config.go b/daemon/config/config.go index 549009c42029b..b8ea161198aff 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -136,6 +136,12 @@ type CommonConfig struct { SocketGroup string `json:"group,omitempty"` CorsHeaders string `json:"api-cors-header,omitempty"` + // DeprecatedTrustKeyPath was used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + DeprecatedTrustKeyPath string `json:"deprecated-key-path,omitempty"` + // LiveRestoreEnabled determines whether we should keep containers // alive upon daemon shutdown/start LiveRestoreEnabled bool `json:"live-restore,omitempty"` diff --git a/daemon/daemon.go b/daemon/daemon.go index 420cfb12b64cd..0662e3750ba27 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -973,7 +973,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - uuid, err := loadOrCreateUUID(filepath.Join(config.Root, "engine_uuid")) + id, err := loadOrCreateID(filepath.Join(config.Root, "engine_id"), config.DeprecatedTrustKeyPath) if err != nil { return nil, err } @@ -1018,7 +1018,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, errors.New("Devices cgroup isn't mounted") } - d.ID = uuid + d.ID = id d.repository = daemonRepo d.containers = container.NewMemoryStore() if d.containersReplica, err = container.NewViewDB(); err != nil { diff --git a/daemon/id.go b/daemon/id.go new file mode 100644 index 0000000000000..eaeeca384ebb3 --- /dev/null +++ b/daemon/id.go @@ -0,0 +1,43 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libtrust" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +func loadOrCreateID(path, deprecatedTrustKeyPath string) (string, error) { + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return "", err + } + var id string + idb, err := ioutil.ReadFile(path) + if os.IsNotExist(err) { + // first try to fallback to trust-key based ID + trustKey, err := libtrust.LoadKeyFile(deprecatedTrustKeyPath) + if err == nil { + id = trustKey.PublicKey().KeyID() + } else { + if err != libtrust.ErrKeyFileDoesNotExist { + logrus.Warnf("Error loading deprecated key file %s (%v). Falling back to generating new ID instead", deprecatedTrustKeyPath, err) + } + // then fallback to generating UUID + id = uuid.New().String() + } + if err := ioutils.AtomicWriteFile(path, []byte(id), os.FileMode(0600)); err != nil { + return "", fmt.Errorf("Error saving ID file: %s", err) + } + } else if err != nil { + return "", fmt.Errorf("Error loading ID file %s: %s", path, err) + } else { + id = string(idb) + } + return id, nil +} diff --git a/daemon/uuid.go b/daemon/uuid.go deleted file mode 100644 index 9640866f2a25a..0000000000000 --- a/daemon/uuid.go +++ /dev/null @@ -1,35 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" - "github.com/google/uuid" -) - -func loadOrCreateUUID(path string) (string, error) { - err := os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - return "", err - } - var id string - idb, err := ioutil.ReadFile(path) - if os.IsNotExist(err) { - id = uuid.New().String() - if err := ioutils.AtomicWriteFile(path, []byte(id), os.FileMode(0600)); err != nil { - return "", fmt.Errorf("Error saving uuid file: %s", err) - } - } else if err != nil { - return "", fmt.Errorf("Error loading uuid file %s: %s", path, err) - } else { - idp, err := uuid.Parse(string(idb)) - if err != nil { - return "", fmt.Errorf("Error parsing uuid in file %s: %s", path, err) - } - id = idp.String() - } - return id, nil -} diff --git a/distribution/push_v2.go b/distribution/push_v2.go index 8bc0edd4c165e..8c2a9dcb96e02 100644 --- a/distribution/push_v2.go +++ b/distribution/push_v2.go @@ -10,7 +10,6 @@ import ( "sync" "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" @@ -187,13 +186,13 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id var canonicalManifest []byte switch v := manifest.(type) { - case *schema1.SignedManifest: - canonicalManifest = v.Canonical case *schema2.DeserializedManifest: _, canonicalManifest, err = v.Payload() if err != nil { return err } + default: + return fmt.Errorf("unknown manifest type %T", v) } manifestDigest := digest.FromBytes(canonicalManifest) diff --git a/integration/system/uuid_test.go b/integration/system/uuid_test.go index d6df8fd4c8b1c..67ae677896279 100644 --- a/integration/system/uuid_test.go +++ b/integration/system/uuid_test.go @@ -5,19 +5,17 @@ import ( "testing" "github.com/docker/docker/api/types/versions" - "github.com/google/uuid" "gotest.tools/assert" "gotest.tools/skip" ) func TestUUIDGeneration(t *testing.T) { - skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "ID format changed") + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "ID format changed") defer setupTest(t)() c := testEnv.APIClient() info, err := c.Info(context.Background()) assert.NilError(t, err) - _, err = uuid.Parse(info.ID) - assert.NilError(t, err, info.ID) + assert.Assert(t, info.ID != "") } From 19b322008d0771583bc40aaafcb50c489bc4af0f Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 20 Jun 2019 20:54:01 +0000 Subject: [PATCH 05/73] daemon: do not mkdir trust directory Signed-off-by: Tibor Vass --- daemon/daemon.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 0662e3750ba27..57a28ddb7e786 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -978,12 +978,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - trustDir := filepath.Join(config.Root, "trust") - - if err := system.MkdirAll(trustDir, 0700, ""); err != nil { - return nil, err - } - // We have a single tag/reference store for the daemon globally. However, it's // stored under the graphdriver. On host platforms which only support a single // container OS, but multiple selectable graphdrivers, this means depending on which From d1c3fe14eab5f18931feab31d7adc820cecb8d52 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 16 Oct 2018 16:32:46 -0700 Subject: [PATCH 06/73] Use containerd client to pull images Signed-off-by: Derek McGowan --- daemon/daemon.go | 3 +- daemon/images/image_pull.go | 60 ++++++++++++++++++------------------- daemon/images/service.go | 20 ++++++++----- 3 files changed, 44 insertions(+), 39 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 57a28ddb7e786..fb0dcc012858c 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -930,7 +930,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S for operatingSystem, gd := range d.graphDrivers { layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ - Root: config.Root, + Root: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), GraphDriver: gd, GraphDriverOptions: config.GraphOptions, @@ -1034,6 +1034,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // used above to run migration. They could be initialized in ImageService // if migration is called from daemon/images. layerStore might move as well. d.imageService = images.NewImageService(images.ImageServiceConfig{ + Client: d.containerdCli, ContainerStore: d.containers, DistributionMetadataStore: distributionMetadataStore, EventsService: d.EventsService, diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index f2b689486193c..10218c70aa3c0 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -6,13 +6,12 @@ import ( "strings" "time" + "github.com/containerd/containerd" dist "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/distribution" - progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/progress" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" @@ -54,36 +53,35 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Include a buffer so that slow client connections don't affect // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePullConfig := &distribution.ImagePullConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, - MetadataStore: i.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), - ReferenceStore: i.referenceStore, - }, - DownloadManager: i.downloadManager, - Schema2Types: distribution.ImageTypes, - Platform: platform, - } + //progressChan := make(chan progress.Progress, 100) + + //writesDone := make(chan struct{}) + + //ctx, cancelFunc := context.WithCancel(ctx) + + // TODO: Lease + + opts := []containerd.RemoteOpt{} + // TODO: Custom resolver + // - Auth config + // - Custom headers + // TODO: Platforms using `platform` + // TODO: progress tracking + // TODO: unpack tracking, use download manager for now? + + // TODO: keep image + _, err := i.client.Pull(ctx, ref.String(), opts...) + + // TODO: Unpack into layer store + // TODO: only unpack image types (does containerd already do this?) + + //go func() { + // progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + // close(writesDone) + //}() - err := distribution.Pull(ctx, ref, imagePullConfig) - close(progressChan) - <-writesDone + //close(progressChan) + //<-writesDone return err } diff --git a/daemon/images/service.go b/daemon/images/service.go index 9034e5f37ced8..4ffaceb9b8ed2 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -5,6 +5,7 @@ import ( "os" "runtime" + "github.com/containerd/containerd" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" "github.com/docker/docker/distribution" @@ -30,6 +31,7 @@ type containerStore interface { // ImageServiceConfig is the configuration used to create a new ImageService type ImageServiceConfig struct { + Client *containerd.Client ContainerStore containerStore DistributionMetadataStore metadata.Store EventsService *daemonevents.Events @@ -46,6 +48,7 @@ func NewImageService(config ImageServiceConfig) *ImageService { logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) return &ImageService{ + client: config.Client, containers: config.ContainerStore, distributionMetadataStore: config.DistributionMetadataStore, downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), @@ -60,15 +63,18 @@ func NewImageService(config ImageServiceConfig) *ImageService { // ImageService provides a backend for image management type ImageService struct { - containers containerStore + client *containerd.Client + containers containerStore + eventsService *daemonevents.Events + layerStores map[string]layer.Store // By operating system + pruneRunning int32 + + // To be replaced by containerd client + registryService registry.Service + referenceStore dockerreference.Store + imageStore image.Store distributionMetadataStore metadata.Store downloadManager *xfer.LayerDownloadManager - eventsService *daemonevents.Events - imageStore image.Store - layerStores map[string]layer.Store // By operating system - pruneRunning int32 - referenceStore dockerreference.Store - registryService registry.Service uploadManager *xfer.LayerUploadManager } From f1d6d38c7d38e62cf21a5224e35bfe0aac0a6b6c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 17 Oct 2018 12:52:10 -0700 Subject: [PATCH 07/73] Use containerd to list images Signed-off-by: Derek McGowan --- api/server/router/distribution/backend.go | 10 +- .../distribution/distribution_routes.go | 127 ++---- daemon/cluster/executor/backend.go | 4 +- daemon/cluster/services.go | 17 +- daemon/create.go | 4 +- daemon/images/image.go | 86 +++- daemon/images/image_events.go | 4 +- daemon/images/image_inspect.go | 2 +- daemon/images/image_prune.go | 2 +- daemon/images/image_pull.go | 41 +- daemon/images/images.go | 377 +++++++++++------- image/image.go | 31 +- 12 files changed, 375 insertions(+), 330 deletions(-) diff --git a/api/server/router/distribution/backend.go b/api/server/router/distribution/backend.go index 5b881f036b499..7045c644a88c8 100644 --- a/api/server/router/distribution/backend.go +++ b/api/server/router/distribution/backend.go @@ -1,15 +1,7 @@ package distribution // import "github.com/docker/docker/api/server/router/distribution" -import ( - "context" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - // Backend is all the methods that need to be implemented // to provide image specific functionality. type Backend interface { - GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + // TODO: containerd content store or manifest returned from Named and AuthConfig } diff --git a/api/server/router/distribution/distribution_routes.go b/api/server/router/distribution/distribution_routes.go index d285728382772..bfdc988b447c4 100644 --- a/api/server/router/distribution/distribution_routes.go +++ b/api/server/router/distribution/distribution_routes.go @@ -7,15 +7,11 @@ import ( "net/http" "strings" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" - "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -48,7 +44,8 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res if err != nil { return errdefs.InvalidParameter(err) } - namedRef, ok := ref.(reference.Named) + //namedRef, ok := ref.(reference.Named) + _, ok := ref.(reference.Named) if !ok { if _, ok := ref.(reference.Digested); ok { // full image ID @@ -57,94 +54,38 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image)) } - distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) - if err != nil { - return err - } - blobsrvc := distrepo.Blobs(ctx) - - if canonicalRef, ok := namedRef.(reference.Canonical); !ok { - namedRef = reference.TagNameOnly(namedRef) - - taggedRef, ok := namedRef.(reference.NamedTagged) - if !ok { - return errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", image)) - } - - descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) - if err != nil { - return err - } - distributionInspect.Descriptor = v1.Descriptor{ - MediaType: descriptor.MediaType, - Digest: descriptor.Digest, - Size: descriptor.Size, - } - } else { - // TODO(nishanttotla): Once manifests can be looked up as a blob, the - // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) - // instead of having to manually fill in the fields - distributionInspect.Descriptor.Digest = canonicalRef.Digest() - } - - // we have a digest, so we can retrieve the manifest - mnfstsrvc, err := distrepo.Manifests(ctx) - if err != nil { - return err - } - mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) - if err != nil { - switch err { - case reference.ErrReferenceInvalidFormat, - reference.ErrTagInvalidFormat, - reference.ErrDigestInvalidFormat, - reference.ErrNameContainsUppercase, - reference.ErrNameEmpty, - reference.ErrNameTooLong, - reference.ErrNameNotCanonical: - return errdefs.InvalidParameter(err) - } - return err - } - - mediaType, payload, err := mnfst.Payload() - if err != nil { - return err - } - // update MediaType because registry might return something incorrect - distributionInspect.Descriptor.MediaType = mediaType - if distributionInspect.Descriptor.Size == 0 { - distributionInspect.Descriptor.Size = int64(len(payload)) - } - - // retrieve platform information depending on the type of manifest - switch mnfstObj := mnfst.(type) { - case *manifestlist.DeserializedManifestList: - for _, m := range mnfstObj.Manifests { - distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ - Architecture: m.Platform.Architecture, - OS: m.Platform.OS, - OSVersion: m.Platform.OSVersion, - OSFeatures: m.Platform.OSFeatures, - Variant: m.Platform.Variant, - }) - } - case *schema2.DeserializedManifest: - configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) - var platform v1.Platform - if err == nil { - err := json.Unmarshal(configJSON, &platform) - if err == nil && (platform.OS != "" || platform.Architecture != "") { - distributionInspect.Platforms = append(distributionInspect.Platforms, platform) - } - } - case *schema1.SignedManifest: - platform := v1.Platform{ - Architecture: mnfstObj.Architecture, - OS: "linux", - } - distributionInspect.Platforms = append(distributionInspect.Platforms, platform) - } + // TODO: Just pull manifest blob, done... + + // TODO: Umm, get rid of this and just use images.Manifest + + //// retrieve platform information depending on the type of manifest + //switch mnfstObj := mnfst.(type) { + //case *manifestlist.DeserializedManifestList: + // for _, m := range mnfstObj.Manifests { + // distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ + // Architecture: m.Platform.Architecture, + // OS: m.Platform.OS, + // OSVersion: m.Platform.OSVersion, + // OSFeatures: m.Platform.OSFeatures, + // Variant: m.Platform.Variant, + // }) + // } + //case *schema2.DeserializedManifest: + // configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) + // var platform v1.Platform + // if err == nil { + // err := json.Unmarshal(configJSON, &platform) + // if err == nil && (platform.OS != "" || platform.Architecture != "") { + // distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + // } + // } + //case *schema1.SignedManifest: + // platform := v1.Platform{ + // Architecture: mnfstObj.Architecture, + // OS: "linux", + // } + // distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + //} return httputils.WriteJSON(w, http.StatusOK, distributionInspect) } diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index c40f85716b1e7..16c888404d773 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -5,8 +5,6 @@ import ( "io" "time" - "github.com/docker/distribution" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -72,6 +70,6 @@ type VolumeBackend interface { // ImageBackend is used by an executor to perform image operations type ImageBackend interface { PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + // TODO: Provide interface to do shallow pull and get digest from Named and Auth LookupImage(name string) (*types.ImageInspect, error) } diff --git a/daemon/cluster/services.go b/daemon/cluster/services.go index 933e2bcac3da0..f10e749bf3cca 100644 --- a/daemon/cluster/services.go +++ b/daemon/cluster/services.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -571,21 +572,17 @@ func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authC if _, ok := namedRef.(reference.Canonical); !ok { namedRef = reference.TagNameOnly(namedRef) - taggedRef, ok := namedRef.(reference.NamedTagged) + // TODO(containerd): use tagged ref to pull + //taggedRef, ok := namedRef.(reference.NamedTagged) + _, ok := namedRef.(reference.NamedTagged) if !ok { return "", errors.Errorf("image reference not tagged: %s", image) } - repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig) - if err != nil { - return "", err - } - dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) - if err != nil { - return "", err - } + // TODO: just pull the image with no blobs and get the digest + var img images.Image - namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) + namedDigestedRef, err := reference.WithDigest(namedRef, img.Target.Digest) if err != nil { return "", err } diff --git a/daemon/create.go b/daemon/create.go index f9db0ca834545..bff1b12c9c1a2 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -294,8 +294,8 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) } func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { - if img != nil && img.Config != nil { - if err := merge(config, img.Config); err != nil { + if img != nil && img.V1Image.Config != nil { + if err := merge(config, img.V1Image.Config); err != nil { return err } } diff --git a/daemon/images/image.go b/daemon/images/image.go index 79cc07c4fda46..79c0e5897adbe 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -1,11 +1,18 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "encoding/json" "fmt" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. @@ -30,35 +37,80 @@ func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { if err != nil { return nil, errdefs.InvalidParameter(err) } + + var target ocispec.Descriptor + cs := i.client.ContentStore() + references := []ocispec.Descriptor{} + namedRef, ok := ref.(reference.Named) if !ok { digested, ok := ref.(reference.Digested) if !ok { return nil, ErrImageDoesNotExist{ref} } - id := image.IDFromDigest(digested.Digest()) - if img, err := i.imageStore.Get(id); err == nil { - return img, nil - } - return nil, ErrImageDoesNotExist{ref} - } - if digest, err := i.referenceStore.Get(namedRef); err == nil { - // Search the image stores to get the operating system, defaulting to host OS. - id := image.IDFromDigest(digest) - if img, err := i.imageStore.Get(id); err == nil { - return img, nil + target.Digest = digested.Digest() + + } else { + img, err := i.client.ImageService().Get(context.TODO(), namedRef.String()) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, errors.Wrapf(err, "unable to get image: %q", namedRef.String()) + } + // TODO: If not found here, get all hashes of config and search for best match + // Search based on ID + //if id, err := i.imageStore.Search(refOrID); err == nil { + // img, err := i.imageStore.Get(id) + // if err != nil { + // return nil, ErrImageDoesNotExist{ref} + // } + // return img, nil + //} + return nil, ErrImageDoesNotExist{ref} + } else { + // TODO: Choose correct platform + d, err := images.Config(context.TODO(), cs, img.Target, platforms.Default()) + if err != nil { + if errdefs.IsNotFound(err) { + return nil, ErrImageDoesNotExist{ref} + } + return nil, errors.Wrap(err, "unable to resolve image") + } + target = d + references = append(references, img.Target) } } - // Search based on ID - if id, err := i.imageStore.Search(refOrID); err == nil { - img, err := i.imageStore.Get(id) - if err != nil { + // TODO(containerd): Move the reference setting and resolution + img, err := i.getImage(context.TODO(), target) + if err != nil { + if errdefs.IsNotFound(err) { return nil, ErrImageDoesNotExist{ref} } - return img, nil + return nil, err + } + img.References = references + + return img, nil +} + +func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) (*image.Image, error) { + cs := i.client.ContentStore() + + // TODO(containerd): If not config, resolve + b, err := content.ReadBlob(ctx, cs, target) + if err != nil { + return nil, errors.Wrap(err, "unable to read target blob") + } + + var img ocispec.Image + if err := json.Unmarshal(b, &img); err != nil { + return nil, errors.Wrap(err, "unable to unmarshal image config") } - return nil, ErrImageDoesNotExist{ref} + // TODO(containerd): read labels from blob to get parent and Docker calculated size + return &image.Image{ + Config: target, + Image: &img, + }, nil } diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index d0b3064d7043a..df2268d597aa1 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -12,10 +12,10 @@ func (i *ImageService) LogImageEvent(imageID, refName, action string) { // LogImageEventWithAttributes generates an event related to an image with specific given attributes. func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { img, err := i.GetImage(imageID) - if err == nil && img.Config != nil { + if err == nil && img.V1Image.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. - copyAttributes(attributes, img.Config.Labels) + copyAttributes(attributes, img.V1Image.Config.Labels) } if refName != "" { attributes["name"] = refName diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index 16c4c9b2dc950..fd3bf3bf1b28c 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -74,7 +74,7 @@ func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { ContainerConfig: &img.ContainerConfig, DockerVersion: img.DockerVersion, Author: img.Author, - Config: img.Config, + Config: img.V1Image.Config, Architecture: img.Architecture, Os: img.OperatingSystem(), OsVersion: img.OSVersion, diff --git a/daemon/images/image_prune.go b/daemon/images/image_prune.go index 313494f2f48d0..3cb9f0b53c611 100644 --- a/daemon/images/image_prune.go +++ b/daemon/images/image_prune.go @@ -85,7 +85,7 @@ func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Arg if !until.IsZero() && img.Created.After(until) { continue } - if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { + if img.V1Image.Config != nil && !matchLabels(pruneFilters, img.V1Image.Config.Labels) { continue } topImages[id] = img diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 10218c70aa3c0..2da60d980fa15 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -7,12 +7,9 @@ import ( "time" "github.com/containerd/containerd" - dist "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution" "github.com/docker/docker/errdefs" - "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -85,40 +82,4 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return err } -// GetRepository returns a repository from the registry. -func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { - // get repository info - repoInfo, err := i.registryService.ResolveRepository(ref) - if err != nil { - return nil, false, errdefs.InvalidParameter(err) - } - // makes sure name is not empty or `scratch` - if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { - return nil, false, errdefs.InvalidParameter(err) - } - - // get endpoints - endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) - if err != nil { - return nil, false, err - } - - // retrieve repository - var ( - confirmedV2 bool - repository dist.Repository - lastError error - ) - - for _, endpoint := range endpoints { - if endpoint.Version == registry.APIVersion1 { - continue - } - - repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") - if lastError == nil && confirmedV2 { - break - } - } - return repository, confirmedV2, lastError -} +// TODO: Add shallow pull function which returns descriptor diff --git a/daemon/images/images.go b/daemon/images/images.go index d17a2e245576c..5f70a4b9fb152 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -1,17 +1,23 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "encoding/json" "fmt" "sort" + "strings" "time" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/system" @@ -44,16 +50,12 @@ func (i *ImageService) Map() map[image.ID]*image.Image { // named all controls whether all images in the graph are filtered, or just // the heads. func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { - var ( - allImages map[image.ID]*image.Image - err error - danglingOnly = false - ) - + ctx := context.TODO() if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } + danglingOnly := false if imageFilters.Contains("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true @@ -61,14 +63,10 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} } } - if danglingOnly { - allImages = i.imageStore.Heads() - } else { - allImages = i.imageStore.Map() - } var beforeFilter, sinceFilter *image.Image - err = imageFilters.WalkValues("before", func(value string) error { + err := imageFilters.WalkValues("before", func(value string) error { + var err error beforeFilter, err = i.GetImage(value) return err }) @@ -77,6 +75,7 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr } err = imageFilters.WalkValues("since", func(value string) error { + var err error sinceFilter, err = i.GetImage(value) return err }) @@ -84,176 +83,262 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr return nil, err } - images := []*types.ImageSummary{} - var imagesMap map[*image.Image]*types.ImageSummary - var layerRefs map[layer.ChainID]int - var allLayers map[layer.ChainID]layer.Layer - var allContainers []*container.Container - - for id, img := range allImages { - if beforeFilter != nil { - if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { - continue - } + var filters []string + if danglingOnly { + filters = append(filters, "name~=/sha256:[a-z0-9]+/") + } else if imageFilters.Contains("reference") { + for _, v := range imageFilters.Get("reference") { + // TODO: Parse reference, if only partial match then + // use as regex + filters = append(filters, "name=="+v) } + } - if sinceFilter != nil { - if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { - continue + if imageFilters.Contains("label") { + var labels []string + for _, v := range imageFilters.Get("label") { + sv := strings.SplitN(v, "=", 2) + if len(sv) == 2 { + filters = append(filters, fmt.Sprintf("labels.%q==%s", sv[0], sv[1])) + } else { + filters = append(filters, "labels."+sv[0]) } } - if imageFilters.Contains("label") { - // Very old image that do not have image.Config (or even labels) - if img.Config == nil { - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", img.Config.Labels) { - continue + labelFilter := strings.Join(labels, ",") + + if len(filters) == 0 { + filters = append(filters, labelFilter) + } else { + for i := range filters { + filters[i] = filters[i] + "," + labelFilter } } + } - // Skip any images with an unsupported operating system to avoid a potential - // panic when indexing through the layerstore. Don't error as we want to list - // the other images. This should never happen, but here as a safety precaution. - if !system.IsOSSupported(img.OperatingSystem()) { - continue - } + allImages, err := i.client.ImageService().List(ctx, filters...) + if err != nil { + return nil, err + } - layerID := img.RootFS.ChainID() - var size int64 - if layerID != "" { - l, err := i.layerStores[img.OperatingSystem()].Get(layerID) - if err != nil { - // The layer may have been deleted between the call to `Map()` or - // `Heads()` and the call to `Get()`, so we just ignore this error - if err == layer.ErrLayerDoesNotExist { + cs := i.client.ContentStore() + m := map[digest.Digest][]images.Image{} + cache := map[digest.Digest]digest.Digest{} + for _, img := range allImages { + if beforeFilter != nil && beforeFilter.Image.Created != nil { + created := img.Labels["docker.io/created"] + if created != "" { + t, err := time.Parse(created, time.RFC3339) + if err == nil && t.Equal(*beforeFilter.Image.Created) || t.After(*beforeFilter.Image.Created) { continue } - return nil, err - } - - size, err = l.Size() - layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) - if err != nil { - return nil, err } } - newImage := newImage(img, size) - - for _, ref := range i.referenceStore.References(id.Digest()) { - if imageFilters.Contains("reference") { - var found bool - var matchErr error - for _, pattern := range imageFilters.Get("reference") { - found, matchErr = reference.FamiliarMatch(pattern, ref) - if matchErr != nil { - return nil, matchErr - } - if found { - break - } - } - if !found { + if sinceFilter != nil && sinceFilter.Image.Created != nil { + created := img.Labels["docker.io/created"] + if created != "" { + t, err := time.Parse(created, time.RFC3339) + if err == nil && t.Equal(*sinceFilter.Image.Created) || t.Before(*sinceFilter.Image.Created) { continue } } - if _, ok := ref.(reference.Canonical); ok { - newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) - } - if _, ok := ref.(reference.NamedTagged); ok { - newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) - } + } - if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(i.imageStore.Children(id)) == 0 { - if imageFilters.Contains("dangling") && !danglingOnly { - //dangling=false case, so dangling image is not needed - continue - } - if imageFilters.Contains("reference") { // skip images with no references if filtering by reference + config, ok := cache[img.Target.Digest] + + if !ok { + // TODO: Resolve to a config + c, err := images.Config(ctx, cs, img.Target, platforms.Default()) + if err != nil { + if errdefs.IsNotFound(err) { + // TODO: Log this unresolved config continue } - newImage.RepoDigests = []string{"@"} - newImage.RepoTags = []string{":"} - } else { - continue + return nil, err } - } else if danglingOnly && len(newImage.RepoTags) > 0 { + config = c.Digest + } + + m[config] = append(m[config], img) + + // TODO: WTF? + // Skip any images with an unsupported operating system to avoid a potential + // panic when indexing through the layerstore. Don't error as we want to list + // the other images. This should never happen, but here as a safety precaution. + //if !system.IsOSSupported(img.OperatingSystem()) { + // continue + //} + + //var size int64 + // TODO: this seems pretty dumb to do + // Maybe we resolve a config and add size as a config label + //layerID := img.RootFS.ChainID() + //if layerID != "" { + // l, err := i.layerStores[img.OperatingSystem()].Get(layerID) + // if err != nil { + // // The layer may have been deleted between the call to `Map()` or + // // `Heads()` and the call to `Get()`, so we just ignore this error + // if err == layer.ErrLayerDoesNotExist { + // continue + // } + // return nil, err + // } + + // size, err = l.Size() + // layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) + // if err != nil { + // return nil, err + // } + //} + + //newImage := newImage(img, size) + + // TODO: Resolve config blob to get extra metadata + // TODO: Store by target + // TODO: Defer creation of image summary + + //if withExtraAttrs { + // // lazily init variables + // if imagesMap == nil { + // allContainers = i.containers.List() + + // // allLayers is built from all layerstores combined + // allLayers = make(map[layer.ChainID]layer.Layer) + // for _, ls := range i.layerStores { + // layers := ls.Map() + // for k, v := range layers { + // allLayers[k] = v + // } + // } + // imagesMap = make(map[*image.Image]*types.ImageSummary) + // layerRefs = make(map[layer.ChainID]int) + // } + + // // Get container count + // newImage.Containers = 0 + // for _, c := range allContainers { + // if c.ImageID == id { + // newImage.Containers++ + // } + // } + + // // count layer references + // rootFS := *img.RootFS + // rootFS.DiffIDs = nil + // for _, id := range img.RootFS.DiffIDs { + // rootFS.Append(id) + // chid := rootFS.ChainID() + // layerRefs[chid]++ + // if _, ok := allLayers[chid]; !ok { + // return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + // } + // } + // imagesMap[img] = newImage + //} + + //images = append(images, newImage) + } + + imageSums := []*types.ImageSummary{} + //var layerRefs map[layer.ChainID]int + //var allLayers map[layer.ChainID]layer.Layer + //var allContainers []*container.Container + + // TODO: For each found image ID, add references + for config, imgs := range m { + newImage := new(types.ImageSummary) + newImage.ID = config.String() + + image, err := i.getImage(ctx, ocispec.Descriptor{Digest: config}) + if err != nil { + // TODO(containerd): log this continue } + if image.Image.Created != nil { + newImage.Created = image.Image.Created.Unix() + } - if withExtraAttrs { - // lazily init variables - if imagesMap == nil { - allContainers = i.containers.List() - - // allLayers is built from all layerstores combined - allLayers = make(map[layer.ChainID]layer.Layer) - for _, ls := range i.layerStores { - layers := ls.Map() - for k, v := range layers { - allLayers[k] = v - } - } - imagesMap = make(map[*image.Image]*types.ImageSummary) - layerRefs = make(map[layer.ChainID]int) + // TODO: Fill this in from config and content labels + //newImage.ParentID = image.Parent.String() + //newImage.Size = size + //newImage.VirtualSize = size + //newImage.SharedSize = -1 + //newImage.Containers = -1 + //if image.Config != nil { + // newImage.Labels = image.Config.Labels + //} + + // TODO: Add each image reference + // For these, unique them by manifest, none:none or none@digest + digests := map[string]struct{}{} + tags := map[string]struct{}{} + + for _, img := range imgs { + ref, err := reference.Parse(img.Name) + if err != nil { + continue } - - // Get container count - newImage.Containers = 0 - for _, c := range allContainers { - if c.ImageID == id { - newImage.Containers++ + if named, ok := ref.(reference.Named); ok { + if c, ok := named.(reference.Canonical); ok { + digests[reference.FamiliarString(c)] = struct{}{} + } else if t, ok := named.(reference.Tagged); ok { + tags[reference.FamiliarString(t)] = struct{}{} } - } - // count layer references - rootFS := *img.RootFS - rootFS.DiffIDs = nil - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - layerRefs[chid]++ - if _, ok := allLayers[chid]; !ok { - return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + switch img.Target.MediaType { + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + // digest references only refer to manifests + default: + digests[reference.FamiliarName(named)+"@"+img.Target.Digest.String()] = struct{}{} } } - imagesMap[img] = newImage } - images = append(images, newImage) + for d := range digests { + newImage.RepoDigests = append(newImage.RepoDigests, d) + } + for t := range tags { + newImage.RepoTags = append(newImage.RepoTags, t) + } + + if len(newImage.RepoDigests) == 0 { + newImage.RepoDigests = []string{"none@none"} + } + if len(newImage.RepoTags) == 0 { + newImage.RepoTags = []string{"none:none"} + } + + imageSums = append(imageSums, newImage) } - if withExtraAttrs { - // Get Shared sizes - for img, newImage := range imagesMap { - rootFS := *img.RootFS - rootFS.DiffIDs = nil + //if withExtraAttrs { + // // Get Shared sizes + // for img, newImage := range imagesMap { + // rootFS := *img.RootFS + // rootFS.DiffIDs = nil - newImage.SharedSize = 0 - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() + // newImage.SharedSize = 0 + // for _, id := range img.RootFS.DiffIDs { + // rootFS.Append(id) + // chid := rootFS.ChainID() - diffSize, err := allLayers[chid].DiffSize() - if err != nil { - return nil, err - } + // diffSize, err := allLayers[chid].DiffSize() + // if err != nil { + // return nil, err + // } - if layerRefs[chid] > 1 { - newImage.SharedSize += diffSize - } - } - } - } + // if layerRefs[chid] > 1 { + // newImage.SharedSize += diffSize + // } + // } + // } + //} - sort.Sort(sort.Reverse(byCreated(images))) + sort.Sort(sort.Reverse(byCreated(imageSums))) - return images, nil + return imageSums, nil } // SquashImage creates a new image with the diff of the specified image and the specified parent. @@ -352,8 +437,8 @@ func newImage(image *image.Image, size int64) *types.ImageSummary { newImage.VirtualSize = size newImage.SharedSize = -1 newImage.Containers = -1 - if image.Config != nil { - newImage.Labels = image.Config.Labels + if image.V1Image.Config != nil { + newImage.Labels = image.V1Image.Config.Labels } return newImage } diff --git a/image/image.go b/image/image.go index 079ecb813172a..d0bfcec3d2b0e 100644 --- a/image/image.go +++ b/image/image.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ID is the content-addressable ID of an image. @@ -61,18 +62,36 @@ type V1Image struct { // Image stores the image configuration type Image struct { + // DEPRECATED FOR OCI V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` + + // Config is the descriptor for the image configuration + Config ocispec.Descriptor + + // Image is the image configuration + Image *ocispec.Image + + // References refers to known manifests which reference this image + References []ocispec.Descriptor + + // TODO(containerd): this can be a digest to another config + Parent ID `json:"parent,omitempty"` + + // DEPRECATED: in OCI image + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + + // DEPRECATED: now in config platform + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` // rawJSON caches the immutable JSON associated with this image. + // DEPRECATED: use content store + config digest rawJSON []byte // computedID is the ID computed from the hash of the image config. // Not to be confused with the legacy V1 ID in V1Image. + // DEPRECATED: now config digest computedID ID } @@ -93,7 +112,7 @@ func (img *Image) ImageID() string { // RunConfig returns the image's container config. func (img *Image) RunConfig() *container.Config { - return img.Config + return img.V1Image.Config } // BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. From 0fd1042a35b663df3a319645c467539c52324f6e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 26 Oct 2018 17:44:36 -0700 Subject: [PATCH 08/73] Use containerd to create tags Signed-off-by: Derek McGowan --- api/server/backend/build/backend.go | 3 ++- api/server/backend/build/tag.go | 7 +++++- daemon/images/image_import.go | 11 +++++----- daemon/images/image_tag.go | 33 +++++++++++++++++++++-------- 4 files changed, 38 insertions(+), 16 deletions(-) diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index 01780916950e7..d2325cd64505a 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/builder/fscache" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" "google.golang.org/grpc" @@ -20,7 +21,7 @@ import ( // ImageComponent provides an interface for working with images type ImageComponent interface { SquashImage(from string, to string) (string, error) - TagImageWithReference(image.ID, reference.Named) error + TagImageWithReference(ocispec.Descriptor, reference.Named) error } // Builder defines interface for running a build diff --git a/api/server/backend/build/tag.go b/api/server/backend/build/tag.go index f840b9d726076..18d688ba6fccc 100644 --- a/api/server/backend/build/tag.go +++ b/api/server/backend/build/tag.go @@ -6,6 +6,8 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/image" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -33,7 +35,10 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge // TagImages creates image tags for the imageID func (bt *Tagger) TagImages(imageID image.ID) error { for _, rt := range bt.repoAndTags { - if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil { + desc := ocispec.Descriptor{ + Digest: digest.Digest(imageID), + } + if err := bt.imageComponent.TagImageWithReference(desc, rt); err != nil { return err } fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index 8d54e0704fe77..af0cafde1f9aa 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -120,17 +120,18 @@ func (i *ImageService) ImportImage(src string, repository, os string, tag string return err } + // TODO(containerd): Use content store + image store id, err := i.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly - if newRef != nil { - if err := i.TagImageWithReference(id, newRef); err != nil { - return err - } - } + //if newRef != nil { + // if err := i.TagImageWithReference(dgst, newRef); err != nil { + // return err + // } + //} i.LogImageEvent(id.String(), id.String(), "import") outStream.Write(streamformatter.FormatStatus("", id.String())) diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 4693611c3a199..29e185ebab4cd 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -1,18 +1,30 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + + "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" - "github.com/docker/docker/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { + // TODO(containerd): Lookup existing image descriptor img, err := i.GetImage(imageName) if err != nil { return "", err } + var target ocispec.Descriptor + if len(img.References) > 0 { + target = img.References[0] + } else { + target = img.Config + } + newTag, err := reference.ParseNormalizedNamed(repository) if err != nil { return "", err @@ -23,19 +35,22 @@ func (i *ImageService) TagImage(imageName, repository, tag string) (string, erro } } - err = i.TagImageWithReference(img.ID(), newTag) + err = i.TagImageWithReference(target, newTag) return reference.FamiliarString(newTag), err } // TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { - return err +func (i *ImageService) TagImageWithReference(target ocispec.Descriptor, newTag reference.Named) error { + img := images.Image{ + Name: newTag.String(), + Target: target, } - - if err := i.imageStore.SetLastUpdated(imageID); err != nil { - return err + is := i.client.ImageService() + _, err := is.Create(context.TODO(), img) + if err != nil { + return errors.Wrap(err, "failed to create image") } - i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + // TODO(containerd): Set last updated for target + i.LogImageEvent(target.Digest.String(), reference.FamiliarString(newTag), "tag") return nil } From dc92057410faf45541843e5ab32db167484565e9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 30 Oct 2018 17:05:58 -0700 Subject: [PATCH 09/73] Add image cache Use cache for delete and listing images Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 4 +- api/server/router/image/image_routes.go | 4 +- daemon/daemon.go | 6 + daemon/disk_usage.go | 2 +- daemon/images/cache.go | 202 +++++++++++++++++++++++- daemon/images/image.go | 72 +++++++++ daemon/images/image_delete.go | 202 ++++++++++++------------ daemon/images/image_prune.go | 4 +- daemon/images/image_pull.go | 32 +++- daemon/images/images.go | 44 ++---- daemon/images/service.go | 9 ++ 11 files changed, 437 insertions(+), 144 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 5837f9a9bcdfd..a8187a4b42132 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -20,9 +20,9 @@ type Backend interface { } type imageBackend interface { - ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) + ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) - Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) LookupImage(name string) (*types.ImageInspect, error) TagImage(imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 41994bbef9d4d..105111047115e 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -204,7 +204,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r force := httputils.BoolValue(r, "force") prune := !httputils.BoolValue(r, "noprune") - list, err := s.backend.ImageDelete(name, force, prune) + list, err := s.backend.ImageDelete(ctx, name, force, prune) if err != nil { return err } @@ -237,7 +237,7 @@ func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, imageFilters.Add("reference", filterParam) } - images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + images, err := s.backend.Images(ctx, imageFilters, httputils.BoolValue(r, "all"), false) if err != nil { return err } diff --git a/daemon/daemon.go b/daemon/daemon.go index fb0dcc012858c..c157700ebaa98 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -24,6 +24,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" @@ -1046,6 +1047,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S RegistryService: registryService, }) + // TODO(containerd): create earlier, background, and wait at end + if err := d.imageService.LoadCache(namespaces.WithNamespace(ctx, ContainersNamespace)); err != nil { + return nil, errors.Wrap(err, "failed to load image cache from containerd") + } + go d.execCommandGC() d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), ContainersNamespace, d) diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go index 5bec60d174142..0e1b6e1f4617d 100644 --- a/daemon/disk_usage.go +++ b/daemon/disk_usage.go @@ -26,7 +26,7 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er } // Get all top images with extra attributes - allImages, err := daemon.imageService.Images(filters.NewArgs(), false, true) + allImages, err := daemon.imageService.Images(ctx, filters.NewArgs(), false, true) if err != nil { return nil, fmt.Errorf("failed to retrieve image list: %v", err) } diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 3b433106e8a8b..cd34eb6cf0bf1 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -1,18 +1,212 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "sync" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/digestset" + "github.com/docker/distribution/reference" "github.com/docker/docker/builder" - "github.com/docker/docker/image/cache" + buildcache "github.com/docker/docker/image/cache" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) -// MakeImageCache creates a stateful image cache. +type cachedImage struct { + id digest.Digest + parent digest.Digest + + // Mutable values + m sync.Mutex + references []reference.Named + children []digest.Digest +} + +type cache struct { + m sync.RWMutex + // idCache maps Docker identifiers + idCache map[digest.Digest]*cachedImage + // dCache maps target digests to images + tCache map[digest.Digest]*cachedImage + ids *digestset.Set +} + +func (c *cache) byID(id digest.Digest) *cachedImage { + c.m.RLock() + img, ok := c.idCache[id] + c.m.RUnlock() + if !ok { + return nil + } + return img +} + +func (c *cache) byTarget(target digest.Digest) *cachedImage { + c.m.RLock() + img, ok := c.tCache[target] + c.m.RUnlock() + if !ok { + return nil + } + return img +} + +// LoadCache loads the image cache by scanning containerd images +// and listening to containerd events +// This process can be backgrounded and will hold hold the cache +// lock until fully populated. +func (i *ImageService) LoadCache(ctx context.Context) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + _, err = i.loadNSCache(ctx, namespace) + return err +} + +func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cache, error) { + i.cacheL.Lock() + defer i.cacheL.Unlock() + + c := &cache{ + idCache: map[digest.Digest]*cachedImage{}, + tCache: map[digest.Digest]*cachedImage{}, + ids: digestset.NewSet(), + } + + is := i.client.ImageService() + + // TODO(containerd): This must use some streaming approach + imgs, err := is.List(ctx) + if err != nil { + return nil, err + } + + for _, img := range imgs { + ref, err := reference.Parse(img.Name) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image name") + continue + } + + named, hasName := ref.(reference.Named) + + ci := c.tCache[img.Target.Digest] + if ci == nil { + var id digest.Digest + if !hasName { + digested, ok := ref.(reference.Digested) + if ok { + id = digested.Digest() + } + } + if img.Target.MediaType == images.MediaTypeDockerSchema2Config || img.Target.MediaType == ocispec.MediaTypeImageConfig { + id = img.Target.Digest + + } + if id == "" { + idstr, ok := img.Labels[LabelImageID] + if !ok { + cs := i.client.ContentStore() + // TODO(containerd): resolve architecture from context + platform := platforms.Default() + desc, err := images.Config(ctx, cs, img.Target, platform) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("TODO: no label") + continue + } + id = desc.Digest + } else { + id, err = digest.Parse(idstr) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image id label") + continue + } + } + } + + ci = c.idCache[id] + if ci == nil { + ci = &cachedImage{ + id: id, + } + if s := img.Labels[LabelImageParent]; s != "" { + pid, err := digest.Parse(s) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid parent label") + } else { + ci.parent = pid + } + } + + c.idCache[id] = ci + } + c.tCache[img.Target.Digest] = ci + } + if hasName { + ci.addReference(named) + } + } + i.cache[namespace] = c + + return c, nil +} + +func (ci *cachedImage) addReference(named reference.Named) { + var ( + i int + s = named.String() + ) + + // Add references, add in sorted place + for ; i < len(ci.references); i++ { + if rs := ci.references[i].String(); s < rs { + ci.references = append(ci.references, nil) + copy(ci.references[i+1:], ci.references[i:]) + ci.references[i] = named + break + } else if rs == s { + break + } + } + if i == len(ci.references) { + ci.references = append(ci.references, named) + } +} + +func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { + namespace, ok := namespaces.Namespace(ctx) + if !ok { + // Default namespace + // TODO(containerd): define default in service + namespace = "" + } + i.cacheL.RLock() + c, ok = i.cache[namespace] + i.cacheL.RUnlock() + if !ok { + c, err = i.loadNSCache(ctx, namespace) + if err != nil { + return nil, err + } + } + + return c, nil +} + +// MakeImageCache creates a stateful image cache for build. func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { if len(sourceRefs) == 0 { - return cache.NewLocal(i.imageStore) + return buildcache.NewLocal(i.imageStore) } - cache := cache.New(i.imageStore) + cache := buildcache.New(i.imageStore) for _, ref := range sourceRefs { img, err := i.GetImage(ref) diff --git a/daemon/images/image.go b/daemon/images/image.go index 79c0e5897adbe..d86129b0f81ca 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -6,15 +6,22 @@ import ( "fmt" "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" + digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) +const ( + LabelImageID = "docker.io/image.id" + LabelImageParent = "docker.io/image.parent" +) + // ErrImageDoesNotExist is error returned when no image can be found for a reference. type ErrImageDoesNotExist struct { ref reference.Reference @@ -114,3 +121,68 @@ func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) Image: &img, }, nil } + +func (i *ImageService) getReferences(ctx context.Context, imageID digest.Digest) ([]reference.Named, error) { + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + img := c.byID(imageID) + if img == nil { + return nil, errdefs.NotFound(errors.New("no image with given id")) + } + + return img.references, nil +} + +func (i *ImageService) getCachedRef(ctx context.Context, ref string) (*cachedImage, error) { + parsed, err := reference.ParseAnyReference(ref) + if err != nil { + return nil, err + } + + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + + c.m.RLock() + defer c.m.RUnlock() + + namedRef, ok := parsed.(reference.Named) + if !ok { + digested, ok := parsed.(reference.Digested) + if !ok { + return nil, errdefs.InvalidParameter(errors.New("bad reference")) + } + + ci, ok := c.idCache[digested.Digest()] + if !ok { + return nil, errdefs.NotFound(errors.New("id not found")) + } + return ci, nil + } + + img, err := i.client.ImageService().Get(ctx, namedRef.String()) + if err != nil { + if !cerrdefs.IsNotFound(err) { + return nil, err + } + dgst, err := c.ids.Lookup(ref) + if err != nil { + return nil, errdefs.NotFound(errors.New("reference not found")) + } + ci, ok := c.idCache[dgst] + if !ok { + return nil, errdefs.NotFound(errors.New("id not found")) + } + return ci, nil + } + ci, ok := c.tCache[img.Target.Digest] + if !ok { + // TODO(containerd): Update cache and return + return nil, errdefs.NotFound(errors.New("id not found")) + } + + return ci, nil +} diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 94d6f872dda54..0dc99f888ad6d 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -1,17 +1,19 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "fmt" "strings" "time" + "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -60,25 +62,22 @@ const ( // meaning any delete conflicts will cause the image to not be deleted and the // conflict will not be reported. // -func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { +func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { start := time.Now() - records := []types.ImageDeleteResponseItem{} - img, err := i.GetImage(imageRef) + img, err := i.getCachedRef(ctx, imageRef) if err != nil { return nil, err } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem) - } - imgID := img.ID() - repoRefs := i.referenceStore.References(imgID.Digest()) + imgID := img.id + repoRefs := img.references using := func(c *container.Container) bool { - return c.ImageID == imgID + return digest.Digest(c.ImageID) == imgID } + var deletedRefs []reference.Named var removedRepositoryRef bool if !isImageIDPrefix(imgID.String(), imageRef) { // A repository reference was given and should be removed @@ -101,17 +100,8 @@ func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types. return nil, err } - parsedRef, err = i.removeImageRef(parsedRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - + deletedRefs = append(deletedRefs, parsedRef) i.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - - repoRefs = i.referenceStore.References(imgID.Digest()) // If a tag reference was removed and the only remaining // references to the same repository are digest references, @@ -119,6 +109,9 @@ func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types. if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { foundRepoTagRef := false for _, repoRef := range repoRefs { + if parsedRef.String() == repoRef.String() { + continue + } if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { foundRepoTagRef = true break @@ -126,26 +119,26 @@ func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types. } if !foundRepoTagRef { // Remove canonical references from same repository - var remainingRefs []reference.Named for _, repoRef := range repoRefs { + if parsedRef.String() == repoRef.String() { + continue + } if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := i.removeImageRef(repoRef); err != nil { - return records, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} - records = append(records, untaggedRecord) - } else { - remainingRefs = append(remainingRefs, repoRef) - + // TODO(containerd): can repoRef be name only here? + deletedRefs = append(deletedRefs, repoRef) } } - repoRefs = remainingRefs } } // If it has remaining references then the untag finished the remove - if len(repoRefs) > 0 { + if len(repoRefs)-len(deletedRefs) > 0 { + // Remove all references in containerd + // Do not wait for containerd's garbage collection + records, err := i.removeImageRefs(ctx, deletedRefs, false) + if err != nil { + return nil, errors.Wrap(err, "failed to delete refs") + } return records, nil } @@ -164,20 +157,17 @@ func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types. } for _, repoRef := range repoRefs { - parsedRef, err := i.removeImageRef(repoRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - + // TODO(containerd): can repoRef be name only here? + deletedRefs = append(deletedRefs, repoRef) i.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) } } } - if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + // TODO(containerd): Lock, perform deletion, + // check if image exists then delete layers + records, err := i.imageDeleteHelper(ctx, img, repoRefs, force, prune, removedRepositoryRef) + if err != nil { return nil, err } @@ -225,43 +215,32 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool { return false } -// removeImageRef attempts to parse and remove the given image reference from -// this daemon's store of repository tag/digest references. The given -// repositoryRef must not be an image ID but a repository name followed by an -// optional tag or digest reference. If tag or digest is omitted, the default -// tag is used. Returns the resolved image reference and an error. -func (i *ImageService) removeImageRef(ref reference.Named) (reference.Named, error) { - ref = reference.TagNameOnly(ref) - - // Ignore the boolean value returned, as far as we're concerned, this - // is an idempotent operation and it's okay if the reference didn't - // exist in the first place. - _, err := i.referenceStore.Delete(ref) - - return ref, err -} +// removeImageRefs removes a set of image references +// if the sync flag is set then garbage collection is +// is completed before returning +func (i *ImageService) removeImageRefs(ctx context.Context, refs []reference.Named, sync bool) ([]types.ImageDeleteResponseItem, error) { + records := []types.ImageDeleteResponseItem{} + // TODO(containerd): clear from cache, get cache from arguments -// removeAllReferencesToImageID attempts to remove every reference to the given -// imgID from this daemon's store of repository tag/digest references. Returns -// on the first encountered error. Removed references are logged to this -// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the -// given list of records. -func (i *ImageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { - imageRefs := i.referenceStore.References(imgID.Digest()) + is := i.client.ImageService() - for _, imageRef := range imageRefs { - parsedRef, err := i.removeImageRef(imageRef) - if err != nil { - return err + for i, ref := range refs { + opts := []images.DeleteOpt{} + if sync && i == len(refs)-1 { + opts = append(opts, images.SynchronousDelete()) + } + if err := is.Delete(ctx, ref.String(), opts...); err != nil && !errdefs.IsNotFound(err) { + return records, errors.Wrapf(err, "failed to delete ref: %s", ref.String()) } - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + // TODO(containerd): do this? + //i.LogImageEvent(imgID.String(), imgID.String(), "untag") - i.LogImageEvent(imgID.String(), imgID.String(), "untag") - *records = append(*records, untaggedRecord) + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(ref)} + records = append(records, untaggedRecord) } - return nil + return records, nil } // ImageDeleteConflict holds a soft or hard conflict and an associated error. @@ -297,49 +276,59 @@ func (idc *imageDeleteConflict) Conflict() {} // conflict is encountered, it will be returned immediately without deleting // the image. If quiet is true, any encountered conflicts will be ignored and // the function will return nil immediately without deleting the image. -func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { +func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, repoRefs []reference.Named, force, prune, quiet bool) ([]types.ImageDeleteResponseItem, error) { + // TODO(containerd): lock deletion, make reference removal and checks transactional in the cache? + // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. c := conflictHard if !force { c |= conflictSoft } - if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!i.imageIsDangling(imgID) || conflict.used) { + if conflict := i.checkImageDeleteConflict(img.id, c); conflict != nil { + if quiet && (!i.imageIsDangling(img.id) || conflict.used) { // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. - return nil + return nil, nil } // There was a conflict and it's either a hard conflict OR we are not // forcing deletion on soft conflicts. - return conflict - } - - parent, err := i.imageStore.GetParent(imgID) - if err != nil { - // There may be no parent - parent = "" + return nil, conflict } // Delete all repository tag/digest references to this image. - if err := i.removeAllReferencesToImageID(imgID, records); err != nil { - return err + records, err := i.removeImageRefs(ctx, repoRefs, true) + if err != nil { + return records, err } - removedLayers, err := i.imageStore.Delete(imgID) + // NOTE(containerd): GC can do this in the future + // TODO(containerd): Move this function locally, to track and release layers + // Walk layers and remove reference + removedLayers, err := i.imageStore.Delete(image.ID(img.id)) if err != nil { - return err + return records, err } - i.LogImageEvent(imgID.String(), imgID.String(), "delete") - *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) + i.LogImageEvent(img.id.String(), img.id.String(), "delete") + records = append(records, types.ImageDeleteResponseItem{Deleted: img.id.String()}) for _, removedLayer := range removedLayers { - *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + } + + var parent *cachedImage + if img.parent != "" { + // TODO(containerd): pass cache in + c, err := i.getCache(ctx) + if err != nil { + return records, err + } + parent = c.byID(img.parent) } - if !prune || parent == "" { - return nil + if !prune || parent == nil { + return records, nil } // We need to prune the parent image. This means delete it if there are @@ -347,7 +336,8 @@ func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageD // either running or stopped). // Do not force prunings, but do so quietly (stopping on any encountered // conflicts). - return i.imageDeleteHelper(parent, records, false, true, true) + parentRecords, err := i.imageDeleteHelper(ctx, parent, nil, false, true, true) + return append(records, parentRecords...), nil } // checkImageDeleteConflict determines whether there are any conflicts @@ -356,12 +346,13 @@ func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageD // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { +func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask conflictType) *imageDeleteConflict { // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 { + // TODO(containerd): No use of image store + if mask&conflictDependentChild != 0 && len(i.imageStore.Children(image.ID(imgID))) > 0 { return &imageDeleteConflict{ hard: true, - imgID: imgID, + imgID: image.ID(imgID), message: "image has dependent child images", } } @@ -369,11 +360,11 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp if mask&conflictRunningContainer != 0 { // Check if any running container is using the image. running := func(c *container.Container) bool { - return c.IsRunning() && c.ImageID == imgID + return c.IsRunning() && digest.Digest(c.ImageID) == imgID } if container := i.containers.First(running); container != nil { return &imageDeleteConflict{ - imgID: imgID, + imgID: image.ID(imgID), hard: true, used: true, message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), @@ -382,9 +373,10 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp } // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 { + // TODO(containerd): No use of reference store + if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID)) > 0 { return &imageDeleteConflict{ - imgID: imgID, + imgID: image.ID(imgID), message: "image is referenced in multiple repositories", } } @@ -392,11 +384,11 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp if mask&conflictStoppedContainer != 0 { // Check if any stopped containers reference this image. stopped := func(c *container.Container) bool { - return !c.IsRunning() && c.ImageID == imgID + return !c.IsRunning() && digest.Digest(c.ImageID) == imgID } if container := i.containers.First(stopped); container != nil { return &imageDeleteConflict{ - imgID: imgID, + imgID: image.ID(imgID), used: true, message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } @@ -409,6 +401,10 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. -func (i *ImageService) imageIsDangling(imgID image.ID) bool { - return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0) +func (i *ImageService) imageIsDangling(imgID digest.Digest) bool { + // TODO(containerd): No use of reference store + // TODO(containerd): No use of image store + // To find children, Docker keeps a cache of images along with parents, it + // can also keep a backpointer to parents in memory + return !(len(i.referenceStore.References(imgID)) > 0 || len(i.imageStore.Children(image.ID(imgID))) > 0) } diff --git a/daemon/images/image_prune.go b/daemon/images/image_prune.go index 3cb9f0b53c611..f1accfded1676 100644 --- a/daemon/images/image_prune.go +++ b/daemon/images/image_prune.go @@ -122,7 +122,7 @@ deleteImagesLoop: if shouldDelete { for _, ref := range refs { - imgDel, err := i.ImageDelete(ref.String(), false, true) + imgDel, err := i.ImageDelete(ctx, ref.String(), false, true) if imageDeleteFailed(ref.String(), err) { continue } @@ -131,7 +131,7 @@ deleteImagesLoop: } } else { hex := id.Digest().Hex() - imgDel, err := i.ImageDelete(hex, false, true) + imgDel, err := i.ImageDelete(ctx, hex, false, true) if imageDeleteFailed(hex, err) { continue } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 2da60d980fa15..5a1214f5ded48 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/errdefs" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // PullImage initiates a pull operation. image is the repository name to pull, and @@ -48,6 +49,11 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor } func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + c, err := i.getCache(ctx) + if err != nil { + return err + } + // Include a buffer so that slow client connections don't affect // transfer performance. //progressChan := make(chan progress.Progress, 100) @@ -66,12 +72,34 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO: progress tracking // TODO: unpack tracking, use download manager for now? - // TODO: keep image - _, err := i.client.Pull(ctx, ref.String(), opts...) + img, err := i.client.Pull(ctx, ref.String(), opts...) + + config, err := img.Config(ctx) + if err != nil { + return errors.Wrap(err, "failed to resolve configuration") + } // TODO: Unpack into layer store // TODO: only unpack image types (does containerd already do this?) + // TODO: Update image with ID label + // TODO(containerd): Create manifest reference and add image + + c.m.Lock() + ci, ok := c.idCache[config.Digest] + if ok { + ci.addReference(ref) + // TODO: Add manifest digest ref + } else { + ci = &cachedImage{ + id: config.Digest, + references: []reference.Named{ref}, + } + c.idCache[config.Digest] = ci + } + c.tCache[img.Target().Digest] = ci + c.m.Unlock() + //go func() { // progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) // close(writesDone) diff --git a/daemon/images/images.go b/daemon/images/images.go index 5f70a4b9fb152..3aa1db75aa2ff 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -9,7 +9,7 @@ import ( "time" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/log" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -17,7 +17,6 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/system" @@ -49,8 +48,12 @@ func (i *ImageService) Map() map[image.ID]*image.Image { // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. -func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { - ctx := context.TODO() +func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } @@ -65,7 +68,7 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr } var beforeFilter, sinceFilter *image.Image - err := imageFilters.WalkValues("before", func(value string) error { + err = imageFilters.WalkValues("before", func(value string) error { var err error beforeFilter, err = i.GetImage(value) return err @@ -121,9 +124,9 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr return nil, err } - cs := i.client.ContentStore() m := map[digest.Digest][]images.Image{} - cache := map[digest.Digest]digest.Digest{} + + c.m.RLock() for _, img := range allImages { if beforeFilter != nil && beforeFilter.Image.Created != nil { created := img.Labels["docker.io/created"] @@ -146,30 +149,14 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr } - config, ok := cache[img.Target.Digest] - + ci, ok := c.tCache[img.Target.Digest] if !ok { - // TODO: Resolve to a config - c, err := images.Config(ctx, cs, img.Target, platforms.Default()) - if err != nil { - if errdefs.IsNotFound(err) { - // TODO: Log this unresolved config - continue - } - return nil, err - } - config = c.Digest + // TODO(containerd): Lookup config and update cache + log.G(ctx).WithField("name", img.Name).Debugf("skipping non-cached image") + continue } - m[config] = append(m[config], img) - - // TODO: WTF? - // Skip any images with an unsupported operating system to avoid a potential - // panic when indexing through the layerstore. Don't error as we want to list - // the other images. This should never happen, but here as a safety precaution. - //if !system.IsOSSupported(img.OperatingSystem()) { - // continue - //} + m[ci.id] = append(m[ci.id], img) //var size int64 // TODO: this seems pretty dumb to do @@ -240,6 +227,7 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr //images = append(images, newImage) } + c.m.RUnlock() imageSums := []*types.ImageSummary{} //var layerRefs map[layer.ChainID]int diff --git a/daemon/images/service.go b/daemon/images/service.go index 4ffaceb9b8ed2..aed14312578a6 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -4,6 +4,7 @@ import ( "context" "os" "runtime" + "sync" "github.com/containerd/containerd" "github.com/docker/docker/container" @@ -31,6 +32,7 @@ type containerStore interface { // ImageServiceConfig is the configuration used to create a new ImageService type ImageServiceConfig struct { + DefaultNamespace string Client *containerd.Client ContainerStore containerStore DistributionMetadataStore metadata.Store @@ -48,10 +50,12 @@ func NewImageService(config ImageServiceConfig) *ImageService { logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) return &ImageService{ + namespace: config.DefaultNamespace, client: config.Client, containers: config.ContainerStore, distributionMetadataStore: config.DistributionMetadataStore, downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), + cache: map[string]*cache{}, eventsService: config.EventsService, imageStore: config.ImageStore, layerStores: config.LayerStores, @@ -63,12 +67,17 @@ func NewImageService(config ImageServiceConfig) *ImageService { // ImageService provides a backend for image management type ImageService struct { + namespace string client *containerd.Client containers containerStore eventsService *daemonevents.Events layerStores map[string]layer.Store // By operating system pruneRunning int32 + // namespaced cache + cache map[string]*cache + cacheL sync.RWMutex + // To be replaced by containerd client registryService registry.Service referenceStore dockerreference.Store From fd0f8fbbb9d45e8f97fae80c169baf9a94407fa5 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 6 Dec 2018 14:20:22 -0800 Subject: [PATCH 10/73] Add image tagging by id and reference Signed-off-by: Derek McGowan --- api/server/backend/build/backend.go | 9 ++- api/server/backend/build/tag.go | 10 +-- api/server/router/container/backend.go | 2 +- .../router/container/container_routes.go | 2 +- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/commit.go | 5 +- daemon/images/cache.go | 51 ++++++++---- daemon/images/image.go | 78 ++++++++++++++++--- daemon/images/image_delete.go | 12 +-- daemon/images/image_pull.go | 2 +- daemon/images/image_tag.go | 49 ++++++++---- daemon/images/images.go | 2 +- 13 files changed, 162 insertions(+), 64 deletions(-) diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index d2325cd64505a..2ca984ee123ac 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -10,8 +10,8 @@ import ( "github.com/docker/docker/builder" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/fscache" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" + digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -21,7 +21,7 @@ import ( // ImageComponent provides an interface for working with images type ImageComponent interface { SquashImage(from string, to string) (string, error) - TagImageWithReference(ocispec.Descriptor, reference.Named) error + TagImageWithReference(context.Context, ocispec.Descriptor, reference.Named) error } // Builder defines interface for running a build @@ -93,7 +93,10 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) } if imageID != "" { - err = tagger.TagImages(image.ID(imageID)) + err = tagger.TagImages(ctx, ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.Digest(imageID), + }) } return imageID, err } diff --git a/api/server/backend/build/tag.go b/api/server/backend/build/tag.go index 18d688ba6fccc..760ce7ed58369 100644 --- a/api/server/backend/build/tag.go +++ b/api/server/backend/build/tag.go @@ -1,12 +1,11 @@ package build // import "github.com/docker/docker/api/server/backend/build" import ( + "context" "fmt" "io" "github.com/docker/distribution/reference" - "github.com/docker/docker/image" - digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -33,12 +32,9 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge } // TagImages creates image tags for the imageID -func (bt *Tagger) TagImages(imageID image.ID) error { +func (bt *Tagger) TagImages(ctx context.Context, desc ocispec.Descriptor) error { for _, rt := range bt.repoAndTags { - desc := ocispec.Descriptor{ - Digest: digest.Digest(imageID), - } - if err := bt.imageComponent.TagImageWithReference(desc, rt); err != nil { + if err := bt.imageComponent.TagImageWithReference(ctx, desc, rt); err != nil { return err } fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index 75ea1d82b76d2..e5a9b48eec92b 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -68,7 +68,7 @@ type systemBackend interface { } type commitBackend interface { - CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error) + CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error) } // Backend is all the methods that need to be implemented to provide container specific functionality. diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index ed377d9c61f16..720d4be10142a 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -55,7 +55,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter, Changes: r.Form["changes"], } - imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg) + imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), commitCfg) if err != nil { return err } diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index a8187a4b42132..526356bfecf6f 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -24,7 +24,7 @@ type imageBackend interface { ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) LookupImage(name string) (*types.ImageInspect, error) - TagImage(imageName, repository, tag string) (string, error) + TagImage(ctx context.Context, imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 105111047115e..87bfd4d84e81d 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -259,7 +259,7 @@ func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, if err := httputils.ParseForm(r); err != nil { return err } - if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + if _, err := s.backend.TagImage(ctx, vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { return err } w.WriteHeader(http.StatusCreated) diff --git a/daemon/commit.go b/daemon/commit.go index f20290f47c649..a91e5cc7ba5b8 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "runtime" "strings" @@ -116,7 +117,7 @@ func merge(userConf, imageConf *containertypes.Config) error { // CreateImageFromContainer creates a new image from a container. The container // config will be updated by applying the change set to the custom config, then // applying that config over the existing container config. -func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateImageConfig) (string, error) { +func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, c *backend.CreateImageConfig) (string, error) { start := time.Now() container, err := daemon.GetContainer(name) if err != nil { @@ -170,7 +171,7 @@ func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateIma var imageRef string if c.Repo != "" { - imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag) + imageRef, err = daemon.imageService.TagImage(ctx, string(id), c.Repo, c.Tag) if err != nil { return "", err } diff --git a/daemon/images/cache.go b/daemon/images/cache.go index cd34eb6cf0bf1..dc7a19d25e68d 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -18,7 +18,7 @@ import ( ) type cachedImage struct { - id digest.Digest + config ocispec.Descriptor parent digest.Digest // Mutable values @@ -99,18 +99,21 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach ci := c.tCache[img.Target.Digest] if ci == nil { - var id digest.Digest + var id ocispec.Descriptor if !hasName { digested, ok := ref.(reference.Digested) if ok { - id = digested.Digest() + id = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digested.Digest(), + } } } if img.Target.MediaType == images.MediaTypeDockerSchema2Config || img.Target.MediaType == ocispec.MediaTypeImageConfig { - id = img.Target.Digest + id = img.Target } - if id == "" { + if id.Digest == "" { idstr, ok := img.Labels[LabelImageID] if !ok { cs := i.client.ContentStore() @@ -121,20 +124,24 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach log.G(ctx).WithError(err).WithField("name", img.Name).Debug("TODO: no label") continue } - id = desc.Digest + id = desc } else { - id, err = digest.Parse(idstr) + dgst, err := digest.Parse(idstr) if err != nil { log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image id label") continue } + id = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: dgst, + } } } - ci = c.idCache[id] + ci = c.idCache[id.Digest] if ci == nil { ci = &cachedImage{ - id: id, + config: id, } if s := img.Labels[LabelImageParent]; s != "" { pid, err := digest.Parse(s) @@ -145,7 +152,8 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach } } - c.idCache[id] = ci + c.idCache[id.Digest] = ci + c.ids.Add(id.Digest) } c.tCache[img.Target.Digest] = ci } @@ -180,12 +188,29 @@ func (ci *cachedImage) addReference(named reference.Named) { } } +func (ci *cachedImage) addChild(d digest.Digest) { + var i int + + // Add references, add in sorted place + for ; i < len(ci.children); i++ { + if d < ci.children[i] { + ci.children = append(ci.children, "") + copy(ci.children[i+1:], ci.children[i:]) + ci.children[i] = d + break + } else if ci.children[i] == d { + break + } + } + if i == len(ci.children) { + ci.children = append(ci.children, d) + } +} + func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { namespace, ok := namespaces.Namespace(ctx) if !ok { - // Default namespace - // TODO(containerd): define default in service - namespace = "" + namespace = i.namespace } i.cacheL.RLock() c, ok = i.cache[namespace] diff --git a/daemon/images/image.go b/daemon/images/image.go index d86129b0f81ca..64915784b021b 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -136,14 +136,28 @@ func (i *ImageService) getReferences(ctx context.Context, imageID digest.Digest) } func (i *ImageService) getCachedRef(ctx context.Context, ref string) (*cachedImage, error) { - parsed, err := reference.ParseAnyReference(ref) + img, err := i.getImageByRef(ctx, ref) if err != nil { return nil, err } + return img.cached, nil +} + +type imageLink struct { + name reference.Named + target *ocispec.Descriptor + cached *cachedImage +} + +func (i *ImageService) getImageByRef(ctx context.Context, ref string) (imageLink, error) { + parsed, err := reference.ParseAnyReference(ref) + if err != nil { + return imageLink{}, err + } c, err := i.getCache(ctx) if err != nil { - return nil, err + return imageLink{}, err } c.m.RLock() @@ -153,36 +167,78 @@ func (i *ImageService) getCachedRef(ctx context.Context, ref string) (*cachedIma if !ok { digested, ok := parsed.(reference.Digested) if !ok { - return nil, errdefs.InvalidParameter(errors.New("bad reference")) + return imageLink{}, errdefs.InvalidParameter(errors.New("bad reference")) } ci, ok := c.idCache[digested.Digest()] if !ok { - return nil, errdefs.NotFound(errors.New("id not found")) + return imageLink{}, errdefs.NotFound(errors.New("id not found")) } - return ci, nil + return imageLink{ + cached: ci, + }, nil } img, err := i.client.ImageService().Get(ctx, namedRef.String()) if err != nil { if !cerrdefs.IsNotFound(err) { - return nil, err + return imageLink{}, err } dgst, err := c.ids.Lookup(ref) if err != nil { - return nil, errdefs.NotFound(errors.New("reference not found")) + return imageLink{}, errdefs.NotFound(errors.New("reference not found")) } ci, ok := c.idCache[dgst] if !ok { - return nil, errdefs.NotFound(errors.New("id not found")) + return imageLink{}, errdefs.NotFound(errors.New("id not found")) } - return ci, nil + return imageLink{ + cached: ci, + }, nil } ci, ok := c.tCache[img.Target.Digest] if !ok { // TODO(containerd): Update cache and return - return nil, errdefs.NotFound(errors.New("id not found")) + return imageLink{}, errdefs.NotFound(errors.New("id not found")) + } + + return imageLink{ + name: namedRef, + target: &img.Target, + cached: ci, + }, nil +} + +func (i *ImageService) updateCache(ctx context.Context, img imageLink) error { + c, err := i.getCache(ctx) + if err != nil { + return err + } + + img.cached.m.Lock() + img.cached.addReference(img.name) + img.cached.m.Unlock() + + var parent *cachedImage + + c.m.Lock() + if _, ok := c.tCache[img.target.Digest]; !ok { + c.tCache[img.target.Digest] = img.cached + } + if _, ok := c.idCache[img.cached.config.Digest]; !ok { + c.idCache[img.cached.config.Digest] = img.cached + c.ids.Add(img.cached.config.Digest) + } + if img.cached.parent != "" { + parent = c.idCache[img.cached.parent] + } + c.m.Unlock() + + if parent != nil { + parent.m.Lock() + parent.addChild(img.cached.config.Digest) + parent.m.Unlock() } - return ci, nil + return nil } diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 0dc99f888ad6d..7123eddfd5811 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -70,7 +70,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, return nil, err } - imgID := img.id + imgID := img.config.Digest repoRefs := img.references using := func(c *container.Container) bool { @@ -285,8 +285,8 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, if !force { c |= conflictSoft } - if conflict := i.checkImageDeleteConflict(img.id, c); conflict != nil { - if quiet && (!i.imageIsDangling(img.id) || conflict.used) { + if conflict := i.checkImageDeleteConflict(img.config.Digest, c); conflict != nil { + if quiet && (!i.imageIsDangling(img.config.Digest) || conflict.used) { // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. return nil, nil @@ -306,13 +306,13 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, // NOTE(containerd): GC can do this in the future // TODO(containerd): Move this function locally, to track and release layers // Walk layers and remove reference - removedLayers, err := i.imageStore.Delete(image.ID(img.id)) + removedLayers, err := i.imageStore.Delete(image.ID(img.config.Digest)) if err != nil { return records, err } - i.LogImageEvent(img.id.String(), img.id.String(), "delete") - records = append(records, types.ImageDeleteResponseItem{Deleted: img.id.String()}) + i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") + records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) for _, removedLayer := range removedLayers { records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 5a1214f5ded48..e10b720d4b16f 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -92,7 +92,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO: Add manifest digest ref } else { ci = &cachedImage{ - id: config.Digest, + config: config, references: []reference.Named{ref}, } c.idCache[config.Digest] = ci diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 29e185ebab4cd..ceb1ca6f95a65 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -5,24 +5,22 @@ import ( "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" + "github.com/docker/docker/errdefs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). -func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { - // TODO(containerd): Lookup existing image descriptor - img, err := i.GetImage(imageName) +func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { + img, err := i.getImageByRef(ctx, imageName) if err != nil { return "", err } - var target ocispec.Descriptor - if len(img.References) > 0 { - target = img.References[0] - } else { - target = img.Config + if img.target == nil { + // TODO(containerd): Choose a better target based on other references? + img.target = &img.cached.config } newTag, err := reference.ParseNormalizedNamed(repository) @@ -34,23 +32,42 @@ func (i *ImageService) TagImage(imageName, repository, tag string) (string, erro return "", err } } + img.name = newTag - err = i.TagImageWithReference(target, newTag) + err = i.tagImage(ctx, img) return reference.FamiliarString(newTag), err } // TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(target ocispec.Descriptor, newTag reference.Named) error { - img := images.Image{ - Name: newTag.String(), - Target: target, +func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec.Descriptor, newTag reference.Named) error { + c, err := i.getCache(ctx) + if err != nil { + return err + } + ci := c.byTarget(target.Digest) + if ci == nil { + return errdefs.NotFound(errors.New("target not found")) + } + + return i.tagImage(ctx, imageLink{ + name: newTag, + target: &target, + cached: ci, + }) +} + +func (i *ImageService) tagImage(ctx context.Context, img imageLink) error { + im := images.Image{ + Name: img.name.String(), + Target: *img.target, } is := i.client.ImageService() - _, err := is.Create(context.TODO(), img) + _, err := is.Create(ctx, im) if err != nil { return errors.Wrap(err, "failed to create image") } + // TODO(containerd): Set last updated for target - i.LogImageEvent(target.Digest.String(), reference.FamiliarString(newTag), "tag") - return nil + i.LogImageEvent(img.target.Digest.String(), reference.FamiliarString(img.name), "tag") + return i.updateCache(ctx, img) } diff --git a/daemon/images/images.go b/daemon/images/images.go index 3aa1db75aa2ff..dfbc6b01077bf 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -156,7 +156,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al continue } - m[ci.id] = append(m[ci.id], img) + m[ci.config.Digest] = append(m[ci.config.Digest], img) //var size int64 // TODO: this seems pretty dumb to do From 9c9d9c71645ca898e3d37e96de2bcd3ab9880f40 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 11 Dec 2018 11:32:51 -0800 Subject: [PATCH 11/73] Add image deletion Fix conflict and update removal to use cache and not stores. Layer removal is still missing. Signed-off-by: Derek McGowan --- daemon/images/image_delete.go | 137 ++++++++++++++++++++++------------ 1 file changed, 88 insertions(+), 49 deletions(-) diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 7123eddfd5811..f170883364540 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -7,6 +7,7 @@ import ( "time" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" @@ -70,16 +71,17 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, return nil, err } - imgID := img.config.Digest + imgID := img.config.Digest.String() repoRefs := img.references using := func(c *container.Container) bool { - return digest.Digest(c.ImageID) == imgID + return digest.Digest(c.ImageID) == img.config.Digest } - var deletedRefs []reference.Named var removedRepositoryRef bool - if !isImageIDPrefix(imgID.String(), imageRef) { + if !isImageIDPrefix(imgID, imageRef) { + var deletedRefs []reference.Named + // A repository reference was given and should be removed // first. We can only remove this reference if either force is // true, there are multiple repository references to this @@ -90,7 +92,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, // this image would remain "dangling" and since // we really want to avoid that the client must // explicitly force its removal. - err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID)) return nil, errdefs.Conflict(err) } } @@ -101,7 +103,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, } deletedRefs = append(deletedRefs, parsedRef) - i.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.LogImageEvent(imgID, imgID, "untag") // If a tag reference was removed and the only remaining // references to the same repository are digest references, @@ -135,7 +137,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, if len(repoRefs)-len(deletedRefs) > 0 { // Remove all references in containerd // Do not wait for containerd's garbage collection - records, err := i.removeImageRefs(ctx, deletedRefs, false) + records, err := i.removeImageRefs(ctx, img, deletedRefs, false) if err != nil { return nil, errors.Wrap(err, "failed to delete refs") } @@ -143,25 +145,25 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, } removedRepositoryRef = true - } else { + } else if isSingleReference(repoRefs) { // If an ID reference was given AND there is at most one tag // reference to the image AND all references are within one // repository, then remove all references. - if isSingleReference(repoRefs) { - c := conflictHard - if !force { - c |= conflictSoft &^ conflictActiveReference - } - if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { - return nil, conflict - } - for _, repoRef := range repoRefs { - // TODO(containerd): can repoRef be name only here? - deletedRefs = append(deletedRefs, repoRef) - i.LogImageEvent(imgID.String(), imgID.String(), "untag") - } + c := conflictHard + deletedRefs := 0 + if force { + // Treated all references as deleted + deletedRefs = len(repoRefs) + } else { + // If not forced, fail on soft conflicts + c |= conflictSoft } + if conflict := i.checkImageDeleteConflict(img, c, deletedRefs); conflict != nil { + return nil, conflict + } + + i.LogImageEvent(imgID, imgID, "untag") } // TODO(containerd): Lock, perform deletion, @@ -218,9 +220,8 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool { // removeImageRefs removes a set of image references // if the sync flag is set then garbage collection is // is completed before returning -func (i *ImageService) removeImageRefs(ctx context.Context, refs []reference.Named, sync bool) ([]types.ImageDeleteResponseItem, error) { +func (i *ImageService) removeImageRefs(ctx context.Context, img *cachedImage, refs []reference.Named, sync bool) ([]types.ImageDeleteResponseItem, error) { records := []types.ImageDeleteResponseItem{} - // TODO(containerd): clear from cache, get cache from arguments is := i.client.ImageService() @@ -240,6 +241,39 @@ func (i *ImageService) removeImageRefs(ctx context.Context, refs []reference.Nam records = append(records, untaggedRecord) } + // TODO(containerd): clear from cache, get cache from arguments + img.m.Lock() + + // Note: refs is always sorted in same order as img.references + // since it must be created from img.references loop + var l, j int + for _, ref := range refs { + s := ref.String() + for j < len(img.references) && img.references[j].String() < s { + img.references[l] = img.references[j] + l++ + j++ + } + if j >= len(img.references) { + break + } + if img.references[j].String() == s { + // don't add + j++ + } + } + for j < len(img.references) { + img.references[l] = img.references[j] + l++ + j++ + } + // Shorten original + if l < len(img.references) { + img.references = img.references[:l] + } + + img.m.Unlock() + return records, nil } @@ -278,6 +312,7 @@ func (idc *imageDeleteConflict) Conflict() {} // the function will return nil immediately without deleting the image. func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, repoRefs []reference.Named, force, prune, quiet bool) ([]types.ImageDeleteResponseItem, error) { // TODO(containerd): lock deletion, make reference removal and checks transactional in the cache? + log.G(ctx).Debugf("%s: Delete image with all references: %v", img.config.Digest, repoRefs) // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. @@ -285,37 +320,44 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, if !force { c |= conflictSoft } - if conflict := i.checkImageDeleteConflict(img.config.Digest, c); conflict != nil { - if quiet && (!i.imageIsDangling(img.config.Digest) || conflict.used) { + if conflict := i.checkImageDeleteConflict(img, c, len(repoRefs)); conflict != nil { + if quiet && (!i.imageIsDangling(img) || conflict.used) { + // TODO:(containerd): Is this expecting a no-op in all cases, since now + // remove image refs happens afterwards + log.G(ctx).Debugf("%s: ignoring conflict: %#v", img.config.Digest, conflict) + // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. return nil, nil } + log.G(ctx).Debugf("%s: remove conflict %v", img.config.Digest, conflict) // There was a conflict and it's either a hard conflict OR we are not // forcing deletion on soft conflicts. return nil, conflict } + log.G(ctx).Debugf("%s: removing references", img.config.Digest) // Delete all repository tag/digest references to this image. - records, err := i.removeImageRefs(ctx, repoRefs, true) + records, err := i.removeImageRefs(ctx, img, repoRefs, true) if err != nil { return records, err } // NOTE(containerd): GC can do this in the future // TODO(containerd): Move this function locally, to track and release layers + // Release img.ownedLayers // Walk layers and remove reference - removedLayers, err := i.imageStore.Delete(image.ID(img.config.Digest)) - if err != nil { - return records, err - } + //removedLayers, err := i.imageStore.Delete(image.ID(img.config.Digest)) + //if err != nil { + // return records, err + //} - i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") - records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) - for _, removedLayer := range removedLayers { - records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) - } + //i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") + //records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) + //for _, removedLayer := range removedLayers { + // records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + //} var parent *cachedImage if img.parent != "" { @@ -346,13 +388,13 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask conflictType) *imageDeleteConflict { +func (i *ImageService) checkImageDeleteConflict(img *cachedImage, mask conflictType, deletedRefs int) *imageDeleteConflict { // Check if the image has any descendant images. // TODO(containerd): No use of image store - if mask&conflictDependentChild != 0 && len(i.imageStore.Children(image.ID(imgID))) > 0 { + if mask&conflictDependentChild != 0 && len(img.children) > 0 { return &imageDeleteConflict{ hard: true, - imgID: image.ID(imgID), + imgID: image.ID(img.config.Digest), message: "image has dependent child images", } } @@ -360,11 +402,11 @@ func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask confli if mask&conflictRunningContainer != 0 { // Check if any running container is using the image. running := func(c *container.Container) bool { - return c.IsRunning() && digest.Digest(c.ImageID) == imgID + return c.IsRunning() && digest.Digest(c.ImageID) == img.config.Digest } if container := i.containers.First(running); container != nil { return &imageDeleteConflict{ - imgID: image.ID(imgID), + imgID: image.ID(img.config.Digest), hard: true, used: true, message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), @@ -373,10 +415,9 @@ func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask confli } // Check if any repository tags/digest reference this image. - // TODO(containerd): No use of reference store - if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID)) > 0 { + if mask&conflictActiveReference != 0 && len(img.references) > deletedRefs { return &imageDeleteConflict{ - imgID: image.ID(imgID), + imgID: image.ID(img.config.Digest), message: "image is referenced in multiple repositories", } } @@ -384,11 +425,11 @@ func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask confli if mask&conflictStoppedContainer != 0 { // Check if any stopped containers reference this image. stopped := func(c *container.Container) bool { - return !c.IsRunning() && digest.Digest(c.ImageID) == imgID + return !c.IsRunning() && digest.Digest(c.ImageID) == img.config.Digest } if container := i.containers.First(stopped); container != nil { return &imageDeleteConflict{ - imgID: image.ID(imgID), + imgID: image.ID(img.config.Digest), used: true, message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } @@ -401,10 +442,8 @@ func (i *ImageService) checkImageDeleteConflict(imgID digest.Digest, mask confli // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. -func (i *ImageService) imageIsDangling(imgID digest.Digest) bool { - // TODO(containerd): No use of reference store - // TODO(containerd): No use of image store +func (i *ImageService) imageIsDangling(img *cachedImage) bool { // To find children, Docker keeps a cache of images along with parents, it // can also keep a backpointer to parents in memory - return !(len(i.referenceStore.References(imgID)) > 0 || len(i.imageStore.Children(image.ID(imgID))) > 0) + return !(len(img.references) > 0 || len(img.children) > 0) } From caad97211339597fbdb73a636570271cea715e0c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 17 Dec 2018 16:44:34 -0800 Subject: [PATCH 12/73] Add support for layers Unpack and delete layers Signed-off-by: Derek McGowan --- daemon/images/cache.go | 6 ++ daemon/images/image_delete.go | 31 +++++----- daemon/images/image_pull.go | 104 ++++++++++++++++++++++++++++++++++ 3 files changed, 127 insertions(+), 14 deletions(-) diff --git a/daemon/images/cache.go b/daemon/images/cache.go index dc7a19d25e68d..ad23cc9183e1d 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/builder" buildcache "github.com/docker/docker/image/cache" + "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -25,6 +26,11 @@ type cachedImage struct { m sync.Mutex references []reference.Named children []digest.Digest + + // Layer held by Docker, this should get removed when + // moved to containerd snapshotters. The garbage + // collection in containerd is reasonable for cleanup. + layer layer.Layer } type cache struct { diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index f170883364540..b371ba5fd6b81 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -3,6 +3,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "fmt" + "runtime" "strings" "time" @@ -344,20 +345,22 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, return records, err } - // NOTE(containerd): GC can do this in the future - // TODO(containerd): Move this function locally, to track and release layers - // Release img.ownedLayers - // Walk layers and remove reference - //removedLayers, err := i.imageStore.Delete(image.ID(img.config.Digest)) - //if err != nil { - // return records, err - //} - - //i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") - //records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) - //for _, removedLayer := range removedLayers { - // records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) - //} + i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") + records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) + + // TODO(containerd): Snapshot integration will obsolete this section, + // containerd's garbage collector can own the removal of the layer + if img.layer != nil { + // TODO(containerd): Use function to get layer store + removedLayers, err := i.layerStores[runtime.GOOS].Release(img.layer) + if err != nil { + return records, err + } + + for _, removedLayer := range removedLayers { + records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + } + } var parent *cachedImage if img.parent != "" { diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index e10b720d4b16f..19afa9af270a5 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -3,16 +3,25 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "io" + "runtime" "strings" "time" "github.com/containerd/containerd" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" + "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // PullImage initiates a pull operation. image is the repository name to pull, and @@ -79,6 +88,11 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return errors.Wrap(err, "failed to resolve configuration") } + l, err := i.unpack(ctx, img.Target()) + if err != nil { + return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) + } + // TODO: Unpack into layer store // TODO: only unpack image types (does containerd already do this?) @@ -88,12 +102,23 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference c.m.Lock() ci, ok := c.idCache[config.Digest] if ok { + ll := ci.layer + ci.layer = l + if ll != nil { + metadata, err := i.layerStores[runtime.GOOS].Release(ll) + if err != nil { + return errors.Wrap(err, "failed to release layer") + } + layer.LogReleaseMetadata(metadata) + } + ci.addReference(ref) // TODO: Add manifest digest ref } else { ci = &cachedImage{ config: config, references: []reference.Named{ref}, + layer: l, } c.idCache[config.Digest] = ci } @@ -111,3 +136,82 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } // TODO: Add shallow pull function which returns descriptor + +func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (layer.Layer, error) { + var ( + cs = i.client.ContentStore() + ) + + manifest, err := images.Manifest(ctx, cs, target, platforms.Default()) + if err != nil { + return nil, err + } + + diffIDs, err := images.RootFS(ctx, cs, manifest.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + if len(diffIDs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers") + } + + var ( + chain = []digest.Digest{} + l layer.Layer + ) + for d := range diffIDs { + chain = append(chain, diffIDs[d]) + + nl, err := i.applyLayer(ctx, manifest.Layers[d], chain) + if err != nil { + return nil, errors.Wrapf(err, "failed to apply layer %d", d) + } + logrus.Debugf("Layer applied: %s (%s)", nl.DiffID(), diffIDs[d]) + + if l != nil { + metadata, err := i.layerStores[runtime.GOOS].Release(l) + if err != nil { + return nil, errors.Wrap(err, "failed to release layer") + } + layer.LogReleaseMetadata(metadata) + } + + // TODO(containerd): verify diff ID + + l = nl + } + return l, nil +} + +func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, layers []digest.Digest) (layer.Layer, error) { + var ( + cs = i.client.ContentStore() + ls = i.layerStores[runtime.GOOS] + ) + + l, err := ls.Get(layer.ChainID(identity.ChainID(layers))) + if err == nil { + return l, nil + } else if err != layer.ErrLayerDoesNotExist { + return nil, err + } + + ra, err := cs.ReaderAt(ctx, blob) + if err != nil { + return nil, err + } + defer ra.Close() + + dc, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return nil, err + } + defer dc.Close() + + var parent digest.Digest + if len(layers) > 1 { + parent = identity.ChainID(layers[:len(layers)-1]) + } + + return ls.Register(dc, layer.ChainID(parent)) +} From 7ef9ca329db02ce048fbdc9aceaa00afa89f312f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 17 Jan 2019 18:44:06 -0800 Subject: [PATCH 13/73] Add support for inspect Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- api/types/types.go | 8 +- daemon/cluster/executor/backend.go | 2 +- daemon/cluster/executor/container/adapter.go | 2 +- daemon/images/cache.go | 2 +- daemon/images/image_inspect.go | 159 ++++++++++++++----- 7 files changed, 128 insertions(+), 49 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 526356bfecf6f..891137aa0cf7d 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -23,7 +23,7 @@ type imageBackend interface { ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) - LookupImage(name string) (*types.ImageInspect, error) + LookupImage(ctx context.Context, name string) (*types.ImageInspect, error) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 87bfd4d84e81d..82d2b8a8f0afd 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -213,7 +213,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r } func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - imageInspect, err := s.backend.LookupImage(vars["name"]) + imageInspect, err := s.backend.LookupImage(ctx, vars["name"]) if err != nil { return err } diff --git a/api/types/types.go b/api/types/types.go index b13d9c4c7df66..438faead00d60 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -33,10 +33,10 @@ type ImageInspect struct { Parent string Comment string Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string + Container string `json:",omitempty"` + ContainerConfig *container.Config `json:",omitempty"` + DockerVersion string `json:",omitempty"` + Author string `json:",omitempty"` Config *container.Config Architecture string Os string diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index 16c888404d773..7754b30b93fd0 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -71,5 +71,5 @@ type VolumeBackend interface { type ImageBackend interface { PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error // TODO: Provide interface to do shallow pull and get digest from Named and Auth - LookupImage(name string) (*types.ImageInspect, error) + LookupImage(ctx context.Context, name string) (*types.ImageInspect, error) } diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index 720b8447fc803..f357e15361342 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -74,7 +74,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { named, err := reference.ParseNormalizedNamed(spec.Image) if err == nil { if _, ok := named.(reference.Canonical); ok { - _, err := c.imageBackend.LookupImage(spec.Image) + _, err := c.imageBackend.LookupImage(ctx, spec.Image) if err == nil { return nil } diff --git a/daemon/images/cache.go b/daemon/images/cache.go index ad23cc9183e1d..7fe9a3703c4ca 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -37,7 +37,7 @@ type cache struct { m sync.RWMutex // idCache maps Docker identifiers idCache map[digest.Digest]*cachedImage - // dCache maps target digests to images + // tCache maps target digests to images tCache map[digest.Digest]*cachedImage ids *digestset.Set } diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index fd3bf3bf1b28c..78607f2e52c98 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -1,47 +1,76 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "encoding/json" + "runtime" "time" + "github.com/containerd/containerd/content" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/image" + containertype "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // LookupImage looks up an image by name and returns it as an ImageInspect // structure. -func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { - img, err := i.GetImage(name) +func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.ImageInspect, error) { + ci, err := i.getCachedRef(ctx, name) if err != nil { - return nil, errors.Wrapf(err, "no such image: %s", name) - } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, system.ErrNotSupportedOperatingSystem + return nil, err } - refs := i.referenceStore.References(img.ID().Digest()) + repoTags := []string{} repoDigests := []string{} - for _, ref := range refs { + for _, ref := range ci.references { switch ref.(type) { case reference.NamedTagged: repoTags = append(repoTags, reference.FamiliarString(ref)) + // TODO(containerd): these references may need to come from + // metadata used for cross repository push case reference.Canonical: repoDigests = append(repoDigests, reference.FamiliarString(ref)) } } + p, err := content.ReadBlob(ctx, i.client.ContentStore(), ci.config) + if err != nil { + return nil, errors.Wrap(err, "failed to read config") + } + + var img struct { + ocispec.Image + + // Overwrite config for custom Docker fields + Config imageConfig `json:"config,omitempty"` + + Comment string `json:"comment,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + // TODO: Overwrite this with a label from config + DockerVersion string `json:"docker_version,omitempty"` + } + + if err := json.Unmarshal(p, &img); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal config") + } + var size int64 var layerMetadata map[string]string - layerID := img.RootFS.ChainID() + layerID := identity.ChainID(img.RootFS.DiffIDs) if layerID != "" { - l, err := i.layerStores[img.OperatingSystem()].Get(layerID) + l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(layerID)) if err != nil { return nil, err } - defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) + defer layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) size, err = l.Size() if err != nil { return nil, err @@ -54,45 +83,45 @@ func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { } comment := img.Comment - if len(comment) == 0 && len(img.History) > 0 { + if img.Comment == "" && len(img.History) > 0 { comment = img.History[len(img.History)-1].Comment } - lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) - if err != nil { - return nil, err - } + // TODO(containerd): Get from label? + //lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) + //if err != nil { + // return nil, err + //} imageInspect := &types.ImageInspect{ - ID: img.ID().String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: img.Parent.String(), - Comment: comment, - Created: img.Created.Format(time.RFC3339Nano), - Container: img.Container, - ContainerConfig: &img.ContainerConfig, - DockerVersion: img.DockerVersion, - Author: img.Author, - Config: img.V1Image.Config, - Architecture: img.Architecture, - Os: img.OperatingSystem(), - OsVersion: img.OSVersion, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate - RootFS: rootFSToAPIType(img.RootFS), - Metadata: types.ImageMetadata{ - LastTagTime: lastUpdated, - }, + ID: ci.config.Digest.String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: ci.parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: configToApiType(img.Config), + Architecture: img.Architecture, + Os: img.OS, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + // TODO(containerd): Get from labels? + //Metadata: types.ImageMetadata{ + // LastTagTime: lastUpdated, + //}, } - imageInspect.GraphDriver.Name = i.layerStores[img.OperatingSystem()].DriverName() + imageInspect.GraphDriver.Name = i.layerStores[runtime.GOOS].DriverName() imageInspect.GraphDriver.Data = layerMetadata return imageInspect, nil } -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { +func rootFSToAPIType(rootfs ocispec.RootFS) types.RootFS { var layers []string for _, l := range rootfs.DiffIDs { layers = append(layers, l.String()) @@ -102,3 +131,53 @@ func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { Layers: layers, } } + +func configToApiType(c imageConfig) *containertype.Config { + return &containertype.Config{ + User: c.User, + ExposedPorts: portSetToApiType(c.ExposedPorts), + Env: c.Env, + WorkingDir: c.WorkingDir, + Labels: c.Labels, + StopSignal: c.StopSignal, + Volumes: c.Volumes, + Entrypoint: strslice.StrSlice(c.Entrypoint), + Cmd: strslice.StrSlice(c.Cmd), + + // From custom Docker type (aligned with what builder sets) + Healthcheck: c.Healthcheck, + ArgsEscaped: c.ArgsEscaped, + OnBuild: c.OnBuild, + StopTimeout: c.StopTimeout, + Shell: c.Shell, + } +} + +func portSetToApiType(ports map[string]struct{}) nat.PortSet { + ps := nat.PortSet{} + for p := range ports { + ps[nat.Port(p)] = struct{}{} + } + return ps +} + +// imageConfig is a docker compatible config for an image +type imageConfig struct { + ocispec.ImageConfig + + // Healthcheck defines healthchecks for the image + // uses api type which matches what is set by the builder + Healthcheck *containertype.HealthConfig `json:",omitempty"` + + // ArgsEscaped is true if command is already escaped (Windows specific) + ArgsEscaped bool `json:",omitempty"` + + // OnBuild is ONBUILD metadata that were defined on the image Dockerfile + OnBuild []string + + // StopTimeout (in seconds) to stop a container + StopTimeout *int `json:",omitempty"` + + // Shell for shell-form of RUN, CMD, ENTRYPOINT + Shell strslice.StrSlice `json:",omitempty"` +} From 2b93cc8a6122dfa2d602260ee9e4db5cfd68255c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 23 Jan 2019 10:55:23 -0800 Subject: [PATCH 14/73] Update image history to use containerd Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/images/image.go | 1 + daemon/images/image_builder.go | 2 + daemon/images/image_commit.go | 1 + daemon/images/image_events.go | 1 + daemon/images/image_history.go | 50 +++++++++++++++++-------- 7 files changed, 41 insertions(+), 18 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 891137aa0cf7d..c5a6c848c5938 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -21,7 +21,7 @@ type Backend interface { type imageBackend interface { ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) - ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) + ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) LookupImage(ctx context.Context, name string) (*types.ImageInspect, error) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 82d2b8a8f0afd..40730884f0089 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -247,7 +247,7 @@ func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { name := vars["name"] - history, err := s.backend.ImageHistory(name) + history, err := s.backend.ImageHistory(ctx, name) if err != nil { return err } diff --git a/daemon/images/image.go b/daemon/images/image.go index 64915784b021b..d277cd667ab1a 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -101,6 +101,7 @@ func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { return img, nil } +// TODO(containerd): remove or replace this function to return local type func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) (*image.Image, error) { cs := i.client.ContentStore() diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index cdf951c6f5649..e160c72419fda 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -139,6 +139,7 @@ func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLay } // TODO: could this use the regular daemon PullImage ? +// TODO(containerd): don't return *image.Image type func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform *specs.Platform) (*image.Image, error) { ref, err := reference.ParseNormalizedNamed(name) if err != nil { @@ -210,6 +211,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s // This is similar to LoadImage() except that it receives JSON encoded bytes of // an image instead of a tar archive. func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { + // TODO(containerd): use containerd's image store id, err := i.imageStore.Create(config) if err != nil { return nil, errors.Wrapf(err, "failed to create image") diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 4caba9f27b761..335b837e9a0d6 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -45,6 +45,7 @@ func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { } defer layer.ReleaseAndLog(layerStore, l) + // TODO(containerd): put in containerd's image store cc := image.ChildConfig{ ContainerID: c.ContainerID, Author: c.Author, diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index df2268d597aa1..5fd3911acf4ec 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -11,6 +11,7 @@ func (i *ImageService) LogImageEvent(imageID, refName, action string) { // LogImageEventWithAttributes generates an event related to an image with specific given attributes. func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + // TODO(containerd): use i.getCachedRef(imageID) img, err := i.GetImage(imageID) if err == nil && img.V1Image.Config != nil { // image has not been removed yet. diff --git a/daemon/images/image_history.go b/daemon/images/image_history.go index b4ca25b1b652b..48bb501602abe 100644 --- a/daemon/images/image_history.go +++ b/daemon/images/image_history.go @@ -1,28 +1,44 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "encoding/json" "fmt" + "runtime" "time" + "github.com/containerd/containerd/content" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/image" "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // ImageHistory returns a slice of ImageHistory structures for the specified image // name by walking the image lineage. -func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { +func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image.HistoryResponseItem, error) { start := time.Now() - img, err := i.GetImage(name) + ci, err := i.getCachedRef(ctx, name) if err != nil { return nil, err } + p, err := content.ReadBlob(ctx, i.client.ContentStore(), ci.config) + if err != nil { + return nil, errors.Wrap(err, "failed to read config") + } + + var img ocispec.Image + if err := json.Unmarshal(p, &img); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal config") + } + history := []*image.HistoryResponseItem{} layerCounter := 0 - rootFS := *img.RootFS + rootFS := img.RootFS rootFS.DiffIDs = nil for _, h := range img.History { @@ -32,16 +48,13 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, if len(img.RootFS.DiffIDs) <= layerCounter { return nil, fmt.Errorf("too many non-empty layers in History section") } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, system.ErrNotSupportedOperatingSystem - } - rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) + rootFS.DiffIDs = append(rootFS.DiffIDs, img.RootFS.DiffIDs[layerCounter]) + l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(identity.ChainID(rootFS.DiffIDs))) if err != nil { return nil, err } layerSize, err = l.DiffSize() - layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) + layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) if err != nil { return nil, err } @@ -58,14 +71,19 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, }}, history...) } + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + // Fill in image IDs and tags - histImg := img - id := img.ID() + histImg := ci + id := ci.config.Digest for _, h := range history { h.ID = id.String() var tags []string - for _, r := range i.referenceStore.References(id.Digest()) { + for _, r := range histImg.references { if _, ok := r.(reference.NamedTagged); ok { tags = append(tags, reference.FamiliarString(r)) } @@ -73,12 +91,12 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, h.Tags = tags - id = histImg.Parent + id = histImg.parent if id == "" { break } - histImg, err = i.GetImage(id.String()) - if err != nil { + histImg = c.byID(id) + if histImg == nil { break } } From 7a063788f23e92010f9ff03685823425f5f97e6c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 28 Jan 2019 19:05:29 -0800 Subject: [PATCH 15/73] Add support for docker run Signed-off-by: Derek McGowan --- daemon/container.go | 5 +- daemon/create.go | 137 +++++++++++++++++++-------------- daemon/images/cache.go | 2 +- daemon/images/image.go | 12 ++- daemon/images/image_builder.go | 4 +- daemon/images/image_events.go | 2 +- daemon/images/image_pull.go | 2 +- daemon/images/images.go | 4 +- daemon/images/service.go | 74 +++++++++++++++--- daemon/list.go | 19 ++--- 10 files changed, 175 insertions(+), 86 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index a82d60c26850a..526e029731d58 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/docker/go-connections/nat" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -127,7 +128,7 @@ func (daemon *Daemon) Register(c *container.Container) error { return c.CheckpointTo(daemon.containersReplica) } -func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { +func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, img ocispec.Descriptor, managed bool) (*container.Container, error) { var ( id string err error @@ -157,7 +158,7 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config * base.Args = args //FIXME: de-duplicate from config base.Config = config base.HostConfig = &containertypes.HostConfig{} - base.ImageID = imgID + base.ImageID = image.ID(img.Digest) base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem) diff --git a/daemon/create.go b/daemon/create.go index bff1b12c9c1a2..f745b179014d3 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -1,21 +1,24 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + "encoding/json" "fmt" "net" "runtime" "strings" "time" + "github.com/containerd/containerd/content" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/images" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -29,7 +32,7 @@ type createOpts struct { // CreateManagedContainer creates a container that is managed by a Service func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(createOpts{ + return daemon.containerCreate(context.TODO(), createOpts{ params: params, managed: true, ignoreImagesArgsEscaped: false}) @@ -37,7 +40,7 @@ func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) // ContainerCreate creates a regular container func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(createOpts{ + return daemon.containerCreate(context.TODO(), createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: false}) @@ -46,31 +49,33 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (conta // ContainerCreateIgnoreImagesArgsEscaped creates a regular container. This is called from the builder RUN case // and ensures that we do not take the images ArgsEscaped func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(createOpts{ + return daemon.containerCreate(context.TODO(), createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: true}) } -func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.ContainerCreateCreatedBody, error) { +func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (containertypes.ContainerCreateCreatedBody, error) { start := time.Now() if opts.params.Config == nil { return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) } os := runtime.GOOS - if opts.params.Config.Image != "" { - img, err := daemon.imageService.GetImage(opts.params.Config.Image) - if err == nil { - os = img.OS - } - } else { - // This mean scratch. On Windows, we can safely assume that this is a linux - // container. On other platforms, it's the host OS (which it already is) - if runtime.GOOS == "windows" && system.LCOWSupported() { - os = "linux" - } - } + // TODO(containerd): Resolve os for LCOW + // TODO(containerd): Why is this lookup done twice just for LCOW?? + //if opts.params.Config.Image != "" { + // _, img, err := daemon.imageService.GetImage(context.TODO(), params.Config.Image) + // if err == nil { + // os = img.OS + // } + //} else { + // // This mean scratch. On Windows, we can safely assume that this is a linux + // // container. On other platforms, it's the host OS (which it already is) + // if runtime.GOOS == "windows" && system.LCOWSupported() { + // os = "linux" + // } + //} warnings, err := daemon.verifyContainerSettings(os, opts.params.HostConfig, opts.params.Config, false) if err != nil { @@ -90,7 +95,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) } - container, err := daemon.create(opts) + container, err := daemon.create(ctx, opts) if err != nil { return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err } @@ -104,48 +109,21 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container } // Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) { +func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) { var ( container *container.Container - img *image.Image - imgID image.ID + desc ocispec.Descriptor err error ) - os := runtime.GOOS if opts.params.Config.Image != "" { - img, err = daemon.imageService.GetImage(opts.params.Config.Image) + desc, err = daemon.imageService.GetImage(ctx, opts.params.Config.Image) if err != nil { return nil, err } - if img.OS != "" { - os = img.OS - } else { - // default to the host OS except on Windows with LCOW - if runtime.GOOS == "windows" && system.LCOWSupported() { - os = "linux" - } - } - imgID = img.ID() - - if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { - return nil, errors.New("operating system on which parent image was created is not Windows") - } - } else { - if runtime.GOOS == "windows" { - os = "linux" // 'scratch' case. - } } - // On WCOW, if are not being invoked by the builder to create this container (where - // ignoreImagesArgEscaped will be true) - if the image already has its arguments escaped, - // ensure that this is replicated across to the created container to avoid double-escaping - // of the arguments/command line when the runtime attempts to run the container. - if os == "windows" && !opts.ignoreImagesArgsEscaped && img != nil && img.RunConfig().ArgsEscaped { - opts.params.Config.ArgsEscaped = true - } - - if err := daemon.mergeAndVerifyConfig(opts.params.Config, img); err != nil { + if err := daemon.mergeAndVerifyConfig(ctx, opts.params.Config, desc); err != nil { return nil, errdefs.InvalidParameter(err) } @@ -153,7 +131,33 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr return nil, errdefs.InvalidParameter(err) } - if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil { + os := runtime.GOOS + if os == "windows" { + if desc.Digest != "" { + // TODO(containerd): resolve os for LCOW on Windows + // TODO(containerd): ensure platform in descriptor? + // TODO(containerd): Read blob + // TODO(containerd): Unmarshal OS + + //if img.OS != "" { + // os = img.OS + //} else { + // // default to the host OS except on Windows with LCOW + // if runtime.GOOS == "windows" && system.LCOWSupported() { + // os = "linux" + // } + //} + //imgID = desc.Digest + + //if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { + // return nil, errors.New("operating system on which parent image was created is not Windows") + //} + } else { + os = "linux" // 'scratch' case. + } + } + + if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, desc, opts.managed); err != nil { return nil, err } defer func() { @@ -188,7 +192,13 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr } // Set RWLayer for container after mount labels have been set - rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMapping)) + createOpts := []images.CreateLayerOpt{ + images.WithLayerImage(desc), + images.WithLayerContainer(container), + images.WithLayerInit(setupInitLayer(daemon.idMapping)), + } + + rwLayer, err := daemon.imageService.CreateLayer(ctx, createOpts...) if err != nil { return nil, errdefs.System(err) } @@ -293,10 +303,25 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) return nil, nil } -func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { - if img != nil && img.V1Image.Config != nil { - if err := merge(config, img.V1Image.Config); err != nil { - return err +func (daemon *Daemon) mergeAndVerifyConfig(ctx context.Context, config *containertypes.Config, img ocispec.Descriptor) error { + if img.Digest != "" { + p, err := content.ReadBlob(ctx, daemon.containerdCli.ContentStore(), img) + if err != nil { + return errors.Wrap(err, "failed to read config") + } + + // Only parse out the config key + var imgConfig struct { + Config *containertypes.Config `json:"config,omitempty"` + } + if err := json.Unmarshal(p, &imgConfig); err != nil { + return errors.Wrap(err, "failed to parse image config") + } + + if imgConfig.Config != nil { + if err := merge(config, imgConfig.Config); err != nil { + return err + } } } // Reset the Entrypoint if it is [""] diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 7fe9a3703c4ca..f535ce5be0a4e 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -240,7 +240,7 @@ func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { cache := buildcache.New(i.imageStore) for _, ref := range sourceRefs { - img, err := i.GetImage(ref) + img, err := i.getDockerImage(ref) if err != nil { logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) continue diff --git a/daemon/images/image.go b/daemon/images/image.go index d277cd667ab1a..d271839b6c642 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -38,8 +38,18 @@ func (e ErrImageDoesNotExist) Error() string { // NotFound implements the NotFound interface func (e ErrImageDoesNotExist) NotFound() {} +func (i *ImageService) GetImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { + ci, err := i.getCachedRef(ctx, refOrID) + if err != nil { + return ocispec.Descriptor{}, err + } + + return ci.config, nil +} + // GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { +// Deprecated: Use (i *ImageService).GetImage instead. +func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { ref, err := reference.ParseAnyReference(refOrID) if err != nil { return nil, errdefs.InvalidParameter(err) diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index e160c72419fda..9df0d898dee42 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -162,7 +162,7 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf if err := i.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { return nil, err } - return i.GetImage(name) + return i.getDockerImage(name) } // GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. @@ -182,7 +182,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s } if opts.PullOption != backend.PullOptionForcePull { - image, err := i.GetImage(refOrID) + image, err := i.getDockerImage(refOrID) if err != nil && opts.PullOption == backend.PullOptionNoPull { return nil, nil, err } diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index 5fd3911acf4ec..b2c0dad788bc6 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -12,7 +12,7 @@ func (i *ImageService) LogImageEvent(imageID, refName, action string) { // LogImageEventWithAttributes generates an event related to an image with specific given attributes. func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { // TODO(containerd): use i.getCachedRef(imageID) - img, err := i.GetImage(imageID) + img, err := i.getDockerImage(imageID) if err == nil && img.V1Image.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 19afa9af270a5..9e8c52abf3b1c 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -78,7 +78,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // - Auth config // - Custom headers // TODO: Platforms using `platform` - // TODO: progress tracking + // TODO(containerd): progress tracking // TODO: unpack tracking, use download manager for now? img, err := i.client.Pull(ctx, ref.String(), opts...) diff --git a/daemon/images/images.go b/daemon/images/images.go index dfbc6b01077bf..06c4f96bb6cd7 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -70,7 +70,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al var beforeFilter, sinceFilter *image.Image err = imageFilters.WalkValues("before", func(value string) error { var err error - beforeFilter, err = i.GetImage(value) + beforeFilter, err = i.getDockerImage(value) return err }) if err != nil { @@ -79,7 +79,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al err = imageFilters.WalkValues("since", func(value string) error { var err error - sinceFilter, err = i.GetImage(value) + sinceFilter, err = i.getDockerImage(value) return err }) if err != nil { diff --git a/daemon/images/service.go b/daemon/images/service.go index aed14312578a6..9e03f5769e21b 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/containerd/containerd" + "github.com/containerd/containerd/images" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" "github.com/docker/docker/distribution" @@ -17,6 +18,8 @@ import ( dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -120,28 +123,77 @@ func (i *ImageService) Children(id image.ID) []image.ID { return i.imageStore.Children(id) } +type createLayerOptions struct { + id string + image ocispec.Descriptor + container *container.Container + initFunc layer.MountInit +} + +type CreateLayerOpt func(*createLayerOptions) + +func WithLayerID(id string) CreateLayerOpt { + return func(o *createLayerOptions) { + o.id = id + } +} + +func WithLayerContainer(container *container.Container) CreateLayerOpt { + return func(o *createLayerOptions) { + o.container = container + } +} + +func WithLayerImage(config ocispec.Descriptor) CreateLayerOpt { + return func(o *createLayerOptions) { + o.image = config + } +} + +func WithLayerInit(initFunc layer.MountInit) CreateLayerOpt { + return func(o *createLayerOptions) { + o.initFunc = initFunc + } +} + // CreateLayer creates a filesystem layer for a container. // called from create.go -// TODO: accept an opt struct instead of container? -func (i *ImageService) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { - var layerID layer.ChainID - if container.ImageID != "" { - img, err := i.imageStore.Get(container.ImageID) +func (i *ImageService) CreateLayer(ctx context.Context, opts ...CreateLayerOpt) (layer.RWLayer, error) { + var options createLayerOptions + for _, opt := range opts { + opt(&options) + } + + var chainID digest.Digest + if options.image.Digest != "" { + diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), options.image) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to resolve rootfs") } - layerID = img.RootFS.ChainID() + + chainID = identity.ChainID(diffIDs) } rwLayerOpts := &layer.CreateRWLayerOpts{ - MountLabel: container.MountLabel, - InitFunc: initFunc, - StorageOpt: container.HostConfig.StorageOpt, + InitFunc: options.initFunc, + } + + if options.container != nil { + rwLayerOpts.MountLabel = options.container.MountLabel + rwLayerOpts.StorageOpt = options.container.HostConfig.StorageOpt + if options.id == "" { + options.id = options.container.ID + } + } + + if options.id == "" { + return nil, errors.New("no layer id provided") } // Indexing by OS is safe here as validation of OS has already been performed in create() (the only // caller), and guaranteed non-nil - return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts) + // TODO(containerd): resolve through descriptor + return i.layerStores[runtime.GOOS].CreateRWLayer(options.id, layer.ChainID(chainID), rwLayerOpts) } // GetLayerByID returns a layer by ID and operating system diff --git a/daemon/list.go b/daemon/list.go index 69fb69a90c65f..e48ccae9bd01f 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "sort" "strconv" @@ -317,17 +318,17 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(ancestor) + img, err := daemon.imageService.GetImage(context.TODO(), ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil } - if imagesFilter[img.ID()] { + if imagesFilter[image.ID(img.Digest)] { // Already seen this ancestor, skip it return nil } // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, img.ID(), daemon.imageService.Children) + populateImageFilterByParents(imagesFilter, image.ID(img.Digest), daemon.imageService.Children) return nil }) } @@ -583,18 +584,18 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite // refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { c := s.Container - image := s.Image // keep the original ref if still valid (hasn't changed) - if image != s.ImageID { - img, err := daemon.imageService.GetImage(image) + updated := s.Image // keep the original ref if still valid (hasn't changed) + if updated != s.ImageID { + img, err := daemon.imageService.GetImage(context.TODO(), updated) if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } - if err != nil || img.ImageID() != s.ImageID { + if err != nil || img.Digest.String() != s.ImageID { // ref changed, we need to use original ID - image = s.ImageID + updated = s.ImageID } } - c.Image = image + c.Image = updated return &c, nil } From 93e4ff855fae773bd10eaf99e6361d463efe68f7 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 29 Jan 2019 18:05:55 -0800 Subject: [PATCH 16/73] Update image service functions to use containerd Updates info functions to query the containerd image cache. Updates related call stack to pass down context. Signed-off-by: Derek McGowan --- api/server/router/container/backend.go | 4 +- .../router/container/container_routes.go | 4 +- api/server/router/swarm/backend.go | 2 +- api/server/router/swarm/cluster_routes.go | 2 +- api/server/router/system/backend.go | 2 +- api/server/router/system/system_routes.go | 2 +- builder/builder.go | 2 +- builder/dockerfile/containerbackend.go | 4 +- builder/dockerfile/internals.go | 2 +- builder/dockerfile/mockbackend_test.go | 2 +- cmd/dockerd/daemon.go | 4 +- cmd/dockerd/daemon_unix.go | 3 +- daemon/cluster/executor/backend.go | 6 +- daemon/cluster/executor/container/adapter.go | 2 +- daemon/cluster/executor/container/executor.go | 2 +- daemon/cluster/swarm.go | 8 +- daemon/create.go | 12 +-- daemon/daemon.go | 8 +- daemon/disk_usage.go | 2 +- daemon/events.go | 4 +- daemon/images/service.go | 82 ++++++++++++++----- daemon/info.go | 9 +- daemon/list.go | 44 +++++----- daemon/list_test.go | 11 +-- daemon/reload.go | 5 +- 25 files changed, 142 insertions(+), 86 deletions(-) diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index e5a9b48eec92b..36e5ac76c4013 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -32,7 +32,7 @@ type copyBackend interface { // stateBackend includes functions to implement to provide container state lifecycle functionality. type stateBackend interface { - ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerCreate(ctx context.Context, config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) ContainerKill(name string, sig uint64) error ContainerPause(name string) error ContainerRename(oldName, newName string) error @@ -54,7 +54,7 @@ type monitorBackend interface { ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) - Containers(config *types.ContainerListOptions) ([]*types.Container, error) + Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) } // attachBackend includes function to implement to provide container attaching functionality. diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index 720d4be10142a..a77a07d1af383 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -88,7 +88,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response config.Limit = limit } - containers, err := s.backend.Containers(config) + containers, err := s.backend.Containers(ctx, config) if err != nil { return err } @@ -505,7 +505,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo hostConfig.PidsLimit = nil } - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + ccr, err := s.backend.ContainerCreate(ctx, types.ContainerCreateConfig{ Name: name, Config: config, HostConfig: hostConfig, diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go index d0c7e60fb3270..fa63e363bdb14 100644 --- a/api/server/router/swarm/backend.go +++ b/api/server/router/swarm/backend.go @@ -12,7 +12,7 @@ import ( type Backend interface { Init(req types.InitRequest) (string, error) Join(req types.JoinRequest) error - Leave(force bool) error + Leave(ctx context.Context, force bool) error Inspect() (types.Swarm, error) Update(uint64, types.Spec, types.UpdateFlags) error GetUnlockKey() (string, error) diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go index ef4157bd8abd7..600b2d3664914 100644 --- a/api/server/router/swarm/cluster_routes.go +++ b/api/server/router/swarm/cluster_routes.go @@ -63,7 +63,7 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, } force := httputils.BoolValue(r, "force") - return sr.backend.Leave(force) + return sr.backend.Leave(ctx, force) } func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { diff --git a/api/server/router/system/backend.go b/api/server/router/system/backend.go index f5d2d9810137c..e91e9cc79b416 100644 --- a/api/server/router/system/backend.go +++ b/api/server/router/system/backend.go @@ -13,7 +13,7 @@ import ( // Backend is the methods that need to be implemented to provide // system specific functionality. type Backend interface { - SystemInfo() (*types.Info, error) + SystemInfo(context.Context) (*types.Info, error) SystemVersion() types.Version SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go index 7b455464fb761..553e314ee1e8f 100644 --- a/api/server/router/system/system_routes.go +++ b/api/server/router/system/system_routes.go @@ -44,7 +44,7 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.backend.SystemInfo() + info, err := s.backend.SystemInfo(ctx) if err != nil { return err } diff --git a/builder/builder.go b/builder/builder.go index cf4d737e2baf7..dc3ef2062b525 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -61,7 +61,7 @@ type ExecBackend interface { // ContainerAttachRaw attaches to container. ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error // ContainerCreateIgnoreImagesArgsEscaped creates a new Docker container and returns potential warnings - ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) // ContainerRm removes a container specified by `id`. ContainerRm(name string, config *types.ContainerRmConfig) error // ContainerKill stops the container execution abruptly. diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go index 8b70c6289d3b2..b65c6222e570d 100644 --- a/builder/dockerfile/containerbackend.go +++ b/builder/dockerfile/containerbackend.go @@ -28,8 +28,8 @@ func newContainerManager(docker builder.ExecBackend) *containerManager { } // Create a container -func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { - container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(types.ContainerCreateConfig{ +func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{ Config: runConfig, HostConfig: hostConfig, }) diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index fa35c4f3c06ff..7a90414bba4c6 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -443,7 +443,7 @@ func (b *Builder) create(runConfig *container.Config) (string, error) { isWCOW := runtime.GOOS == "windows" && b.platform != nil && b.platform.OS == "windows" hostConfig := hostConfigFromOptions(b.options, isWCOW) - container, err := b.containerManager.Create(runConfig, hostConfig) + container, err := b.containerManager.Create(b.clientCtx, runConfig, hostConfig) if err != nil { return "", err } diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index d4526eafad870..ddda42a9034d5 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -28,7 +28,7 @@ func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout return nil } -func (m *MockBackend) ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { +func (m *MockBackend) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { if m.containerCreateFunc != nil { return m.containerCreateFunc(config) } diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index e2b3039095e2e..0e2bf6a1aca83 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -338,7 +338,7 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e }, nil } -func (cli *DaemonCli) reloadConfig() { +func (cli *DaemonCli) reloadConfig(ctx context.Context) { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins @@ -361,7 +361,7 @@ func (cli *DaemonCli) reloadConfig() { logrus.Warnf("Configured labels using reserved namespaces is deprecated: %s", err) } - if err := cli.d.Reload(c); err != nil { + if err := cli.d.Reload(ctx, c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go index a6685bb668769..d175cd632d108 100644 --- a/cmd/dockerd/daemon_unix.go +++ b/cmd/dockerd/daemon_unix.go @@ -81,8 +81,9 @@ func (cli *DaemonCli) setupConfigReloadTrap() { c := make(chan os.Signal, 1) signal.Notify(c, unix.SIGHUP) go func() { + ctx := context.Background() for range c { - cli.reloadConfig() + cli.reloadConfig(ctx) } }() } diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index 7754b30b93fd0..353eaeddc9314 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -31,7 +31,7 @@ type Backend interface { FindNetwork(idName string) (libnetwork.Network, error) SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) ReleaseIngress() (<-chan struct{}, error) - CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + CreateManagedContainer(ctx context.Context, config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStop(name string, seconds *int) error ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) @@ -46,8 +46,8 @@ type Backend interface { SetContainerDependencyStore(name string, store exec.DependencyGetter) error SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error - SystemInfo() (*types.Info, error) - Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SystemInfo(context.Context) (*types.Info, error) + Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error DaemonJoinsCluster(provider cluster.Provider) DaemonLeavesCluster() diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index f357e15361342..a82222a3ecacd 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -280,7 +280,7 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error { func (c *containerAdapter) create(ctx context.Context) error { var cr containertypes.ContainerCreateCreatedBody var err error - if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + if cr, err = c.backend.CreateManagedContainer(ctx, types.ContainerCreateConfig{ Name: c.container.name(), Config: c.container.config(), HostConfig: c.container.hostConfig(), diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go index f54dc7b511c68..036fa627aa5a0 100644 --- a/daemon/cluster/executor/container/executor.go +++ b/daemon/cluster/executor/container/executor.go @@ -47,7 +47,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac // Describe returns the underlying node description from the docker client. func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { - info, err := e.backend.SystemInfo() + info, err := e.backend.SystemInfo(ctx) if err != nil { return nil, err } diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go index 85dd6445a0307..d87fc571c132a 100644 --- a/daemon/cluster/swarm.go +++ b/daemon/cluster/swarm.go @@ -356,7 +356,7 @@ func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { } // Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { +func (c *Cluster) Leave(ctx context.Context, force bool) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() @@ -408,7 +408,7 @@ func (c *Cluster) Leave(force bool) error { c.mu.Unlock() if nodeID := state.NodeID(); nodeID != "" { - nodeContainers, err := c.listContainerForNode(nodeID) + nodeContainers, err := c.listContainerForNode(ctx, nodeID) if err != nil { return err } @@ -586,11 +586,11 @@ func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { return ctx.Err() } -func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { +func (c *Cluster) listContainerForNode(ctx context.Context, nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) - containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + containers, err := c.config.Backend.Containers(ctx, &apitypes.ContainerListOptions{ Filters: filters, }) if err != nil { diff --git a/daemon/create.go b/daemon/create.go index f745b179014d3..fb321b070c191 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -31,16 +31,16 @@ type createOpts struct { } // CreateManagedContainer creates a container that is managed by a Service -func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(context.TODO(), createOpts{ +func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: true, ignoreImagesArgsEscaped: false}) } // ContainerCreate creates a regular container -func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(context.TODO(), createOpts{ +func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: false}) @@ -48,8 +48,8 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (conta // ContainerCreateIgnoreImagesArgsEscaped creates a regular container. This is called from the builder RUN case // and ensures that we do not take the images ArgsEscaped -func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(context.TODO(), createOpts{ +func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: true}) diff --git a/daemon/daemon.go b/daemon/daemon.go index c157700ebaa98..aa0435d2673d1 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1064,8 +1064,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } close(d.startupDone) - // FIXME: this method never returns an error - info, _ := d.SystemInfo() + info, err := d.SystemInfo(ctx) + if err != nil { + return nil, err + } engineInfo.WithValues( dockerversion.Version, @@ -1160,7 +1162,7 @@ func (daemon *Daemon) Shutdown() error { if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { // check if there are any running containers, if none we should do some cleanup - if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + if ls, err := daemon.Containers(context.TODO(), &types.ContainerListOptions{}); len(ls) != 0 || err != nil { // metrics plugins still need some cleanup daemon.cleanupMetricsPlugins() return nil diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go index 0e1b6e1f4617d..504fd390cbd8d 100644 --- a/daemon/disk_usage.go +++ b/daemon/disk_usage.go @@ -17,7 +17,7 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) // Retrieve container list - allContainers, err := daemon.Containers(&types.ContainerListOptions{ + allContainers, err := daemon.Containers(ctx, &types.ContainerListOptions{ Size: true, All: true, }) diff --git a/daemon/events.go b/daemon/events.go index cf1634a198818..3ea64f6037290 100644 --- a/daemon/events.go +++ b/daemon/events.go @@ -85,9 +85,9 @@ func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, actio } // LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. -func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { +func (daemon *Daemon) LogDaemonEventWithAttributes(ctx context.Context, action string, attributes map[string]string) { if daemon.EventsService != nil { - if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + if info, err := daemon.SystemInfo(ctx); err == nil && info.Name != "" { attributes["name"] = info.Name } actor := events.Actor{ diff --git a/daemon/images/service.go b/daemon/images/service.go index 9e03f5769e21b..542ddb2d85974 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/distribution" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" dockerreference "github.com/docker/docker/reference" @@ -112,15 +113,35 @@ func (i *ImageService) DistributionServices() DistributionServices { // CountImages returns the number of images stored by ImageService // called from info.go -func (i *ImageService) CountImages() int { - return i.imageStore.Len() +func (i *ImageService) CountImages(ctx context.Context) (int, error) { + c, err := i.getCache(ctx) + if err != nil { + return 0, err + } + + c.m.RLock() + l := len(c.idCache) + c.m.RUnlock() + + return l, nil } -// Children returns the children image.IDs for a parent image. +// ChildrenByID returns the children image digests for a parent image. // called from list.go to filter containers -// TODO: refactor to expose an ancestry for image.ID? -func (i *ImageService) Children(id image.ID) []image.ID { - return i.imageStore.Children(id) +func (i *ImageService) ChildrenByID(ctx context.Context, id digest.Digest) ([]digest.Digest, error) { + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + + c.m.RLock() + ci, ok := c.idCache[id] + c.m.RUnlock() + if !ok { + return nil, nil + } + + return ci.children, nil } type createLayerOptions struct { @@ -256,7 +277,10 @@ func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) e // called from disk_usage.go func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { var allLayersSize int64 - layerRefs := i.getLayerRefs() + layerRefs, err := i.getLayerRefs(ctx) + if err != nil { + return 0, err + } for _, ls := range i.layerStores { allLayers := ls.Map() for _, l := range allLayers { @@ -266,7 +290,7 @@ func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { default: size, err := l.DiffSize() if err == nil { - if _, ok := layerRefs[l.ChainID()]; ok { + if _, ok := layerRefs[digest.Digest(l.ChainID())]; ok { allLayersSize += size } } else { @@ -278,31 +302,47 @@ func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { return allLayersSize, nil } -func (i *ImageService) getLayerRefs() map[layer.ChainID]int { - tmpImages := i.imageStore.Map() - layerRefs := map[layer.ChainID]int{} - for id, img := range tmpImages { - dgst := digest.Digest(id) - if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { +func (i *ImageService) getLayerRefs(ctx context.Context) (map[digest.Digest]int, error) { + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } + + // Create copy and unlock cache + c.m.RLock() + imgs := make(map[digest.Digest]*cachedImage, len(c.idCache)) + for dgst, ci := range c.idCache { + imgs[dgst] = ci + } + c.m.RUnlock() + + layerRefs := map[digest.Digest]int{} + for _, img := range imgs { + if len(img.references) == 0 && len(img.children) != 0 { continue } - rootFS := *img.RootFS - rootFS.DiffIDs = nil - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - layerRefs[chid]++ + diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), img.config) + if err != nil { + if errdefs.IsNotFound(err) { + continue + } + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + + for i := range diffIDs { + layerRefs[identity.ChainID(diffIDs[:i+1])]++ } } - return layerRefs + return layerRefs, nil } // UpdateConfig values // // called from reload.go func (i *ImageService) UpdateConfig(maxDownloads, maxUploads *int) { + // TODO(containerd): store these locally to configure resolver if i.downloadManager != nil && maxDownloads != nil { i.downloadManager.SetConcurrency(*maxDownloads) } diff --git a/daemon/info.go b/daemon/info.go index bfd8199edbc1a..8c9aebf81da19 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "net/url" "os" @@ -26,11 +27,15 @@ import ( ) // SystemInfo returns information about the host server the daemon is running on. -func (daemon *Daemon) SystemInfo() (*types.Info, error) { +func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) { defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))() sysInfo := sysinfo.New(true) cRunning, cPaused, cStopped := stateCtr.get() + count, err := daemon.imageService.CountImages(ctx) + if err != nil { + return nil, err + } v := &types.Info{ ID: daemon.ID, @@ -38,7 +43,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { ContainersRunning: cRunning, ContainersPaused: cPaused, ContainersStopped: cStopped, - Images: daemon.imageService.CountImages(), + Images: count, IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, diff --git a/daemon/list.go b/daemon/list.go index e48ccae9bd01f..d03edfa79faee 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -12,8 +12,8 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/images" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" "github.com/docker/go-connections/nat" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -70,7 +70,7 @@ type listContext struct { // names is a list of container names to filter with names map[string][]string // images is a list of images to filter with - images map[image.ID]bool + images map[digest.Digest]bool // filters is a collection of arguments to filter with, specified by the user filters filters.Args // exitAllowed is a list of exit codes allowed to filter with @@ -105,8 +105,8 @@ func (r byCreatedDescending) Less(i, j int) bool { } // Containers returns the list of containers to show given the user's filtering. -func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.refreshImage) +func (daemon *Daemon) Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(ctx, config, daemon.refreshImage) } func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { @@ -176,7 +176,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex } // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. -func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { +func (daemon *Daemon) reduceContainers(ctx context.Context, config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { if err := config.Filters.Validate(acceptedPsFilterTags); err != nil { return nil, err } @@ -186,7 +186,7 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc containers = []*types.Container{} ) - ctx, err := daemon.foldFilter(view, config) + lctx, err := daemon.foldFilter(ctx, view, config) if err != nil { return nil, err } @@ -194,13 +194,13 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc // fastpath to only look at a subset of containers if specific name // or ID matches were provided by the user--otherwise we potentially // end up querying many more containers than intended - containerList, err := daemon.filterByNameIDMatches(view, ctx) + containerList, err := daemon.filterByNameIDMatches(view, lctx) if err != nil { return nil, err } for i := range containerList { - t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) + t, err := daemon.reducePsContainer(&containerList[i], lctx, reducer) if err != nil { if err != errStopIteration { return nil, err @@ -209,7 +209,7 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc } if t != nil { containers = append(containers, t) - ctx.idx++ + lctx.idx++ } } @@ -242,7 +242,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list } // foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { +func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, config *types.ContainerListOptions) (*listContext, error) { psFilters := config.Filters var filtExited []int @@ -313,23 +313,22 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis return nil, err } - imagesFilter := map[image.ID]bool{} + imagesFilter := map[digest.Digest]bool{} var ancestorFilter bool if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(context.TODO(), ancestor) + img, err := daemon.imageService.GetImage(ctx, ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil } - if imagesFilter[image.ID(img.Digest)] { + if imagesFilter[img.Digest] { // Already seen this ancestor, skip it return nil } // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, image.ID(img.Digest), daemon.imageService.Children) - return nil + return populateImageFilterByParents(ctx, imagesFilter, img.Digest, daemon.imageService.ChildrenByID) }) } @@ -520,7 +519,7 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite if len(ctx.images) == 0 { return excludeContainer } - if !ctx.images[image.ID(container.ImageID)] { + if !ctx.images[digest.Digest(container.ImageID)] { return excludeContainer } } @@ -599,11 +598,18 @@ func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*ty return &c, nil } -func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { +func populateImageFilterByParents(ctx context.Context, ancestorMap map[digest.Digest]bool, imageID digest.Digest, getChildren func(context.Context, digest.Digest) ([]digest.Digest, error)) error { if !ancestorMap[imageID] { - for _, id := range getChildren(imageID) { - populateImageFilterByParents(ancestorMap, id, getChildren) + children, err := getChildren(ctx, imageID) + if err != nil { + return err + } + for _, id := range children { + if err := populateImageFilterByParents(ctx, ancestorMap, id, getChildren); err != nil { + return err + } } ancestorMap[imageID] = true } + return nil } diff --git a/daemon/list_test.go b/daemon/list_test.go index c0a7290cd496d..84836d95058d3 100644 --- a/daemon/list_test.go +++ b/daemon/list_test.go @@ -1,6 +1,7 @@ package daemon import ( + "context" "io/ioutil" "os" "path/filepath" @@ -89,7 +90,7 @@ func TestListInvalidFilter(t *testing.T) { f := filters.NewArgs(filters.Arg("invalid", "foo")) - _, err = d.Containers(&types.ContainerListOptions{ + _, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: f, }) assert.Assert(t, is.Error(err, "Invalid filter 'invalid'")) @@ -110,7 +111,7 @@ func TestNameFilter(t *testing.T) { // moby/moby #37453 - ^ regex not working due to prefix slash // not being stripped - containerList, err := d.Containers(&types.ContainerListOptions{ + containerList, err := d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "^a")), }) assert.NilError(t, err) @@ -119,7 +120,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerList, two.Name)) // Same as above but with slash prefix should produce the same result - containerListWithPrefix, err := d.Containers(&types.ContainerListOptions{ + containerListWithPrefix, err := d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "^/a")), }) assert.NilError(t, err) @@ -128,7 +129,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerListWithPrefix, two.Name)) // Same as above but make sure it works for exact names - containerList, err = d.Containers(&types.ContainerListOptions{ + containerList, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "b1")), }) assert.NilError(t, err) @@ -136,7 +137,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerList, three.Name)) // Same as above but with slash prefix should produce the same result - containerListWithPrefix, err = d.Containers(&types.ContainerListOptions{ + containerListWithPrefix, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "/b1")), }) assert.NilError(t, err) diff --git a/daemon/reload.go b/daemon/reload.go index a31dd0cb87c17..07ccfff7efd82 100644 --- a/daemon/reload.go +++ b/daemon/reload.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "encoding/json" "fmt" @@ -22,7 +23,7 @@ import ( // - Insecure registries // - Registry mirrors // - Daemon live restore -func (daemon *Daemon) Reload(conf *config.Config) (err error) { +func (daemon *Daemon) Reload(ctx context.Context, conf *config.Config) (err error) { daemon.configStore.Lock() attributes := map[string]string{} @@ -35,7 +36,7 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.configStore.Unlock() if err == nil { logrus.Infof("Reloaded configuration: %s", jsonString) - daemon.LogDaemonEventWithAttributes("reload", attributes) + daemon.LogDaemonEventWithAttributes(ctx, "reload", attributes) } }() From a48af865e93ca767c8a995a51cd3952003d8f065 Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Wed, 30 Jan 2019 16:09:04 -0800 Subject: [PATCH 17/73] display docker pull progress for containerd integration Signed-off-by: Anda Xu --- daemon/images/image_pull.go | 214 +++++++++++++++++++++++++++++++++--- 1 file changed, 196 insertions(+), 18 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 9e8c52abf3b1c..6cc0ccbf3a4d8 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -5,8 +5,11 @@ import ( "io" "runtime" "strings" + "sync" "time" + "github.com/docker/docker/pkg/stringid" + "github.com/containerd/containerd" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" @@ -16,6 +19,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -63,17 +68,26 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return err } - // Include a buffer so that slow client connections don't affect - // transfer performance. - //progressChan := make(chan progress.Progress, 100) - - //writesDone := make(chan struct{}) + ongoing := newJobs(ref.Name()) - //ctx, cancelFunc := context.WithCancel(ctx) + pctx, stopProgress := context.WithCancel(ctx) + progress := make(chan struct{}) + go func() { + // no progress bar, because it hides some debug logs + showProgress(pctx, ongoing, ref, i.client.ContentStore(), outStream) + close(progress) + }() // TODO: Lease - - opts := []containerd.RemoteOpt{} + h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.MediaType != images.MediaTypeDockerSchema1Manifest { + ongoing.add(desc) + } + return nil, nil + }) + opts := []containerd.RemoteOpt{ + containerd.WithImageHandler(h), + } // TODO: Custom resolver // - Auth config // - Custom headers @@ -81,14 +95,14 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO(containerd): progress tracking // TODO: unpack tracking, use download manager for now? - img, err := i.client.Pull(ctx, ref.String(), opts...) + img, err := i.client.Pull(pctx, ref.String(), opts...) - config, err := img.Config(ctx) + config, err := img.Config(pctx) if err != nil { return errors.Wrap(err, "failed to resolve configuration") } - l, err := i.unpack(ctx, img.Target()) + l, err := i.unpack(pctx, img.Target()) if err != nil { return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) } @@ -124,14 +138,9 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } c.tCache[img.Target().Digest] = ci c.m.Unlock() + stopProgress() + <-progress - //go func() { - // progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - // close(writesDone) - //}() - - //close(progressChan) - //<-writesDone return err } @@ -215,3 +224,172 @@ func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, return ls.Register(dc, layer.ChainID(parent)) } +func getTagOrDigest(ref reference.Named) string { + var ( + // manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tagOrDigest = tagged.Tag() + } + // todo: is it safe to assume it is always a tag or digest? + return tagOrDigest +} + +const ( + downloading = "Downloading" + dlcomplete = "Download complete" + waiting = "Waiting" + exists = "Already exists" +) + +func showProgress(ctx context.Context, ongoing *jobs, ref reference.Named, cs content.Store, out io.Writer) { + progressOutput := streamformatter.NewJSONProgressOutput(out, false) + progressOutput.WriteProgress(progress.Progress{ID: getTagOrDigest(ref), Message: "Pulling from " + reference.Path(ref)}) + var ( + ticker = time.NewTicker(100 * time.Millisecond) + start = time.Now() + statuses = map[string]StatusInfo{} + done bool + ) + defer ticker.Stop() + +outer: + for { + select { + case <-ticker.C: + activeSeen := map[string]struct{}{} + if !done { + active, err := cs.ListStatuses(ctx, "") + if err != nil { + logrus.Error("active check failed") + continue + } + // update status of active entries! + for _, active := range active { + descID := stringid.TruncateID(active.Ref) + if !strings.Contains(active.Ref, "layer") { + continue + } + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: downloading, Current: active.Offset, Total: active.Total, LastUpdate: false}) + statuses[descID] = StatusInfo{ + Status: downloading, // Downloading + } + activeSeen[descID] = struct{}{} + } + } + + // now, update the items in jobs that are not in active + for _, j := range ongoing.jobs() { + descID := stringid.TruncateID(j.Digest.String()) + if _, ok := activeSeen[descID]; ok { + continue + } + // skip displaying non-layer info + if !isLayer(j) { + continue + } + status, ok := statuses[descID] + if !done && (!ok || status.Status == downloading) { + info, err := cs.Info(ctx, j.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + logrus.Errorf("failed to get content info") + continue outer + } else { + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: waiting}) + statuses[descID] = StatusInfo{ + Status: waiting, + } + } + } else if info.CreatedAt.After(start) { + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: dlcomplete}) + statuses[descID] = StatusInfo{ + Status: dlcomplete, + } + } else { + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: exists}) + statuses[descID] = StatusInfo{ + Status: exists, + } + } + } else if done { + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: dlcomplete}) + if ok { + if status.Status != dlcomplete && status.Status != exists { + status.Status = dlcomplete + statuses[descID] = status + } + } else { + statuses[descID] = StatusInfo{ + Status: dlcomplete, + } + } + } + } + + if done { + return + } + case <-ctx.Done(): + done = true // allow ui to update once more + } + } +} + +// StatusInfo holds the status info for an upload or download +type StatusInfo struct { + Status string +} + +func isLayer(desc ocispec.Descriptor) bool { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, + ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + return true + default: + return false + } +} + +// jobs provides a way of identifying the download keys for a particular task +// encountering during the pull walk. +// +// This is very minimal and will probably be replaced with something more +// featured. +type jobs struct { + name string + added map[digest.Digest]struct{} + descs []ocispec.Descriptor + mu sync.Mutex +} + +func newJobs(name string) *jobs { + return &jobs{ + name: name, + added: map[digest.Digest]struct{}{}, + } +} + +func (j *jobs) add(desc ocispec.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + if _, ok := j.added[desc.Digest]; ok { + return + } + j.descs = append(j.descs, desc) + j.added[desc.Digest] = struct{}{} +} + +func (j *jobs) jobs() []ocispec.Descriptor { + j.mu.Lock() + defer j.mu.Unlock() + + var descs []ocispec.Descriptor + return append(descs, j.descs...) +} From e4de561fa0bf01bbe2f07b63b9a812b2f60d906e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 5 Feb 2019 16:14:47 -0800 Subject: [PATCH 18/73] Add support for commit Fix dangling image management to not rely on reference parsing Signed-off-by: Derek McGowan --- builder/builder.go | 2 +- builder/dockerfile/evaluator.go | 8 +- builder/dockerfile/internals.go | 2 +- builder/dockerfile/mockbackend_test.go | 2 +- daemon/commit.go | 8 +- daemon/images/cache.go | 49 ++++--- daemon/images/image.go | 4 + daemon/images/image_commit.go | 181 ++++++++++++++++++++----- daemon/images/images.go | 50 ++++--- 9 files changed, 225 insertions(+), 81 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index dc3ef2062b525..716b3fb4fb4fd 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -42,7 +42,7 @@ type Backend interface { // CommitBuildStep creates a new Docker image from the config generated by // a build step. - CommitBuildStep(backend.CommitConfig) (image.ID, error) + CommitBuildStep(context.Context, backend.CommitConfig) (image.ID, error) // ContainerCreateWorkdir creates the workdir ContainerCreateWorkdir(containerID string) error diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go index 02e14775280db..69aad74c53841 100644 --- a/builder/dockerfile/evaluator.go +++ b/builder/dockerfile/evaluator.go @@ -20,6 +20,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "context" "reflect" "runtime" "strconv" @@ -105,6 +106,7 @@ func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { // dispatchState is a data object which is modified by dispatchers type dispatchState struct { + ctx context.Context runConfig *container.Config maintainer string cmdSet bool @@ -115,10 +117,10 @@ type dispatchState struct { operatingSystem string } -func newDispatchState(baseArgs *BuildArgs) *dispatchState { +func newDispatchState(ctx context.Context, baseArgs *BuildArgs) *dispatchState { args := baseArgs.Clone() args.ResetAllowed() - return &dispatchState{runConfig: &container.Config{}, buildArgs: args} + return &dispatchState{ctx: ctx, runConfig: &container.Config{}, buildArgs: args} } type stagesBuildResults struct { @@ -195,7 +197,7 @@ type dispatchRequest struct { func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *BuildArgs, stages *stagesBuildResults) dispatchRequest { return dispatchRequest{ - state: newDispatchState(buildArgs), + state: newDispatchState(builder.clientCtx, buildArgs), shlex: shell.NewLex(escapeToken), builder: builder, source: source, diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 7a90414bba4c6..d7d04312b8d59 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -106,7 +106,7 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta ContainerID: id, } - imageID, err := b.docker.CommitBuildStep(commitCfg) + imageID, err := b.docker.CommitBuildStep(dispatchState.ctx, commitCfg) dispatchState.imageID = string(imageID) return err } diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index ddda42a9034d5..969c24f214c58 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -39,7 +39,7 @@ func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) return nil } -func (m *MockBackend) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { +func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { if m.commitFunc != nil { return m.commitFunc(c) } diff --git a/daemon/commit.go b/daemon/commit.go index a91e5cc7ba5b8..ba6950f27e458 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -155,7 +155,7 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, return "", err } - id, err := daemon.imageService.CommitImage(backend.CommitConfig{ + desc, err := daemon.imageService.CommitImage(ctx, backend.CommitConfig{ Author: c.Author, Comment: c.Comment, Config: newConfig, @@ -171,16 +171,16 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, var imageRef string if c.Repo != "" { - imageRef, err = daemon.imageService.TagImage(ctx, string(id), c.Repo, c.Tag) + imageRef, err = daemon.imageService.TagImage(ctx, string(desc.Digest), c.Repo, c.Tag) if err != nil { return "", err } } daemon.LogContainerEventWithAttributes(container, "commit", map[string]string{ "comment": c.Comment, - "imageID": id.String(), + "imageID": desc.Digest.String(), "imageRef": imageRef, }) containerActions.WithValues("commit").UpdateSince(start) - return id.String(), nil + return desc.Digest.String(), nil } diff --git a/daemon/images/cache.go b/daemon/images/cache.go index f535ce5be0a4e..55af904d242e1 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -95,39 +95,50 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach } for _, img := range imgs { - ref, err := reference.Parse(img.Name) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image name") - continue - } + var ( + named reference.Named + id ocispec.Descriptor + ) - named, hasName := ref.(reference.Named) + if danglingID, ok := img.Labels[LabelImageDangling]; !ok { + ref, err := reference.Parse(img.Name) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image name") + continue + } + var ok bool + named, ok = ref.(reference.Named) + if !ok { + log.G(ctx).WithField("name", img.Name).Debug("skipping invalid image name with no name component") + continue + } + } else { + dgst, err := digest.Parse(danglingID) + if err != nil { + log.G(ctx).WithError(err).WithField("id", danglingID).Debug("skipping invalid image id label (dangling)") + continue + } + id = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: dgst, + } + } ci := c.tCache[img.Target.Digest] if ci == nil { - var id ocispec.Descriptor - if !hasName { - digested, ok := ref.(reference.Digested) - if ok { - id = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: digested.Digest(), - } - } - } if img.Target.MediaType == images.MediaTypeDockerSchema2Config || img.Target.MediaType == ocispec.MediaTypeImageConfig { id = img.Target - } if id.Digest == "" { idstr, ok := img.Labels[LabelImageID] if !ok { cs := i.client.ContentStore() // TODO(containerd): resolve architecture from context + // TODO(containerd): support multi-platform images platform := platforms.Default() desc, err := images.Config(ctx, cs, img.Target, platform) if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("TODO: no label") + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to resolve image config for platform") continue } id = desc @@ -163,7 +174,7 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach } c.tCache[img.Target.Digest] = ci } - if hasName { + if named != nil { ci.addReference(named) } } diff --git a/daemon/images/image.go b/daemon/images/image.go index d271839b6c642..e49a80b8e3e30 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -20,6 +20,10 @@ import ( const ( LabelImageID = "docker.io/image.id" LabelImageParent = "docker.io/image.parent" + + // LabelImageDangling refers to images with no name + // Stored on images and points to the image config digest + LabelImageDangling = "docker.io/image.dangling" ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 335b837e9a0d6..34e2886022623 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -1,26 +1,81 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "bytes" + "context" "encoding/json" + "fmt" "io" + "strings" + "time" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/system" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // CommitImage creates a new image from a commit config -func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) (ocispec.Descriptor, error) { + cache, err := i.getCache(ctx) + if err != nil { + return ocispec.Descriptor{}, err + } + + var img struct { + ocispec.Image + + // Overwrite config for custom Docker fields + Container string `json:"container,omitempty"` + ContainerConfig container.Config `json:"container_config,omitempty"` + Config *container.Config `json:"config,omitempty"` + + Comment string `json:"comment,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + // TODO: Overwrite this with a label from config + DockerVersion string `json:"docker_version,omitempty"` + } + + if c.ParentImageID == "" { + img.RootFS.Type = "layers" + } else { + cache.m.RLock() + pci, ok := cache.idCache[digest.Digest(c.ParentImageID)] + cache.m.RUnlock() + + if !ok { + return ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "parent not found") + } + + b, err := content.ReadBlob(ctx, i.client.ContentStore(), pci.config) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to read config") + } + + if err := json.Unmarshal(b, &img); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal config") + } + } + layerStore, ok := i.layerStores[c.ContainerOS] if !ok { - return "", system.ErrNotSupportedOperatingSystem + return ocispec.Descriptor{}, system.ErrNotSupportedOperatingSystem } rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) if err != nil { - return "", err + return ocispec.Descriptor{}, err } defer func() { if rwTar != nil { @@ -28,48 +83,102 @@ func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { } }() - var parent *image.Image - if c.ParentImageID == "" { - parent = new(image.Image) - parent.RootFS = image.NewRootFS() - } else { - parent, err = i.imageStore.Get(image.ID(c.ParentImageID)) - if err != nil { - return "", err - } + // TODO(containerd): Tee compressed output to content store + // for generation of the manifest. + l, err := layerStore.Register(rwTar, layer.ChainID(identity.ChainID(img.RootFS.DiffIDs))) + if err != nil { + return ocispec.Descriptor{}, err } - l, err := layerStore.Register(rwTar, parent.RootFS.ChainID()) - if err != nil { - return "", err + created := time.Now().UTC() + diffID := l.DiffID() + + img.Created = &created + + isEmptyLayer := layer.IsEmpty(diffID) + if !isEmptyLayer { + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(diffID)) } - defer layer.ReleaseAndLog(layerStore, l) + img.History = append(img.History, ocispec.History{ + Author: c.Author, + Created: &created, + CreatedBy: strings.Join(c.ContainerConfig.Cmd, " "), + Comment: c.Comment, + EmptyLayer: isEmptyLayer, + }) - // TODO(containerd): put in containerd's image store - cc := image.ChildConfig{ - ContainerID: c.ContainerID, - Author: c.Author, - Comment: c.Comment, - ContainerConfig: c.ContainerConfig, - Config: c.Config, - DiffID: l.DiffID(), + img.DockerVersion = dockerversion.Version + img.Author = c.Author + img.Comment = c.Comment + if img.OS == "" { + img.OS = c.ContainerOS } - config, err := json.Marshal(image.NewChildImage(parent, cc, c.ContainerOS)) + img.Container = c.ContainerID + img.Config = c.Config + img.ContainerConfig = *c.ContainerConfig + + config, err := json.Marshal(img) if err != nil { - return "", err + layer.ReleaseAndLog(layerStore, l) + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(config), + Size: int64(len(config)), + } + + // TODO(containerd): Add labels (parent, etc) + ref := fmt.Sprintf("config-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc); err != nil { + layer.ReleaseAndLog(layerStore, l) + return ocispec.Descriptor{}, errors.Wrap(err, "unable to store config") } - id, err := i.imageStore.Create(config) + // Create a dangling image + _, err = i.client.ImageService().Create(ctx, images.Image{ + Name: desc.Digest.String(), + Target: desc, + CreatedAt: created, + UpdatedAt: created, + Labels: map[string]string{ + LabelImageDangling: desc.Digest.String(), + }, + }) if err != nil { - return "", err + layer.ReleaseAndLog(layerStore, l) + return ocispec.Descriptor{}, errors.Wrap(err, "unable to store image") } - if c.ParentImageID != "" { - if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil { - return "", err + cache.m.Lock() + if _, ok := cache.idCache[desc.Digest]; !ok { + ci := &cachedImage{ + config: desc, + parent: digest.Digest(c.ParentImageID), + layer: l, + } + cache.idCache[desc.Digest] = ci + + // TODO(containerd): Refer to manifest here + cache.tCache[desc.Digest] = ci + + if ci.parent != "" { + pci, ok := cache.idCache[ci.parent] + if ok { + pci.m.Lock() + pci.children = append(pci.children, desc.Digest) + pci.m.Unlock() + } } + } else { + // Image already exists, don't hold onto layer + defer layer.ReleaseAndLog(layerStore, l) } - return id, nil + + cache.m.Unlock() + + return desc, nil } func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { @@ -115,7 +224,7 @@ func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.R // * it doesn't log a container commit event // // This is a temporary shim. Should be removed when builder stops using commit. -func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { container := i.containers.Get(c.ContainerID) if container == nil { // TODO: use typed error @@ -124,5 +233,9 @@ func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) c.ContainerMountLabel = container.MountLabel c.ContainerOS = container.OS c.ParentImageID = string(container.ImageID) - return i.CommitImage(c) + desc, err := i.CommitImage(ctx, c) + if err != nil { + return "", err + } + return image.ID(desc.Digest.String()), nil } diff --git a/daemon/images/images.go b/daemon/images/images.go index 06c4f96bb6cd7..a16a253f89210 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -264,22 +264,24 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al tags := map[string]struct{}{} for _, img := range imgs { - ref, err := reference.Parse(img.Name) - if err != nil { - continue - } - if named, ok := ref.(reference.Named); ok { - if c, ok := named.(reference.Canonical); ok { - digests[reference.FamiliarString(c)] = struct{}{} - } else if t, ok := named.(reference.Tagged); ok { - tags[reference.FamiliarString(t)] = struct{}{} + if _, ok := img.Labels[LabelImageDangling]; !ok { + ref, err := reference.Parse(img.Name) + if err != nil { + continue } - - switch img.Target.MediaType { - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - // digest references only refer to manifests - default: - digests[reference.FamiliarName(named)+"@"+img.Target.Digest.String()] = struct{}{} + if named, ok := ref.(reference.Named); ok { + if c, ok := named.(reference.Canonical); ok { + digests[reference.FamiliarString(c)] = struct{}{} + } else if t, ok := named.(reference.Tagged); ok { + tags[reference.FamiliarString(t)] = struct{}{} + } + + switch img.Target.MediaType { + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + // digest references only refer to manifests + default: + digests[reference.FamiliarName(named)+"@"+img.Target.Digest.String()] = struct{}{} + } } } } @@ -291,10 +293,22 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al newImage.RepoTags = append(newImage.RepoTags, t) } - if len(newImage.RepoDigests) == 0 { + if len(newImage.RepoDigests) == 0 && len(newImage.RepoTags) == 0 { + // TODO(containerd): also skip if has children + if !all { + continue + } + + if imageFilters.Contains("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + + if imageFilters.Contains("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"none@none"} - } - if len(newImage.RepoTags) == 0 { newImage.RepoTags = []string{"none:none"} } From 85f26624f441b68b91a225de7da2387b89fca151 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 5 Feb 2019 17:05:54 -0800 Subject: [PATCH 19/73] Add parent label to committed images Fix layer references being taken on startup Signed-off-by: Derek McGowan --- daemon/images/cache.go | 15 +++++++++++++++ daemon/images/image.go | 7 ++++++- daemon/images/image_commit.go | 10 ++++++++-- 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 55af904d242e1..b72f60e878a27 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -2,6 +2,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "runtime" "sync" "github.com/containerd/containerd/images" @@ -14,6 +15,7 @@ import ( buildcache "github.com/docker/docker/image/cache" "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -168,11 +170,24 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach ci.parent = pid } } + diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), ci.config) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to load image rootfs") + continue + } + // TODO(containerd): choose correct platform + ci.layer, err = i.layerStores[runtime.GOOS].Get(layer.ChainID(identity.ChainID(diffIDs))) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("no layer for image") + continue + } c.idCache[id.Digest] = ci c.ids.Add(id.Digest) } c.tCache[img.Target.Digest] = ci + + // Load image layer to prevent removal } if named != nil { ci.addReference(named) diff --git a/daemon/images/image.go b/daemon/images/image.go index e49a80b8e3e30..dff18677cc705 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -18,7 +18,12 @@ import ( ) const ( - LabelImageID = "docker.io/image.id" + // LabelImageID refers to the image ID used by Docker + // Deprecate this to support multi-arch images + LabelImageID = "docker.io/image.id" + + // LabelImageParent is Docker's parent image ID + // Stored on the image config blob LabelImageParent = "docker.io/image.parent" // LabelImageDangling refers to images with no name diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 34e2886022623..ec5fc542f1259 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -129,9 +129,15 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) Size: int64(len(config)), } - // TODO(containerd): Add labels (parent, etc) + opts := []content.Opt{} + + if c.ParentImageID != "" { + opts = append(opts, content.WithLabels(map[string]string{ + LabelImageParent: c.ParentImageID, + })) + } ref := fmt.Sprintf("config-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) - if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc); err != nil { + if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc, opts...); err != nil { layer.ReleaseAndLog(layerStore, l) return ocispec.Descriptor{}, errors.Wrap(err, "unable to store config") } From 8435405723a788d821a217e8f3f494329960f448 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 8 Feb 2019 15:31:02 -0800 Subject: [PATCH 20/73] Update events handler to use containerd Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/images/image_delete.go | 6 ++-- daemon/images/image_events.go | 48 ++++++++++++++++--------- daemon/images/image_exporter.go | 15 +++++--- daemon/images/image_import.go | 5 +-- daemon/images/image_tag.go | 2 +- daemon/images/images.go | 5 --- distribution/config.go | 2 +- distribution/pull.go | 2 +- distribution/push.go | 2 +- plugin/backend_linux.go | 44 +++++++++++++---------- 12 files changed, 78 insertions(+), 57 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index c5a6c848c5938..37f3c4c05c0a7 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -30,7 +30,7 @@ type imageBackend interface { type importExportBackend interface { LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ImportImage(ctx context.Context, src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error ExportImage(names []string, outStream io.Writer) error } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 40730884f0089..a3caa21268489 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -85,7 +85,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite if platform != nil { os = platform.OS } - err = s.backend.ImportImage(src, repo, os, tag, message, r.Body, output, r.Form["changes"]) + err = s.backend.ImportImage(ctx, src, repo, os, tag, message, r.Body, output, r.Form["changes"]) } if err != nil { if !output.Flushed() { diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index b371ba5fd6b81..1acaf420478ed 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -104,7 +104,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, } deletedRefs = append(deletedRefs, parsedRef) - i.LogImageEvent(imgID, imgID, "untag") + i.LogImageEvent(ctx, imgID, imgID, "untag") // If a tag reference was removed and the only remaining // references to the same repository are digest references, @@ -164,7 +164,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, return nil, conflict } - i.LogImageEvent(imgID, imgID, "untag") + i.LogImageEvent(ctx, imgID, imgID, "untag") } // TODO(containerd): Lock, perform deletion, @@ -345,7 +345,7 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, return records, err } - i.LogImageEvent(img.config.Digest.String(), img.config.Digest.String(), "delete") + i.LogImageEvent(ctx, img.config.Digest.String(), img.config.Digest.String(), "delete") records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) // TODO(containerd): Snapshot integration will obsolete this section, diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index b2c0dad788bc6..755003ddee18d 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -1,23 +1,23 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "encoding/json" + + "github.com/containerd/containerd/content" "github.com/docker/docker/api/types/events" ) // LogImageEvent generates an event related to an image with only the default attributes. -func (i *ImageService) LogImageEvent(imageID, refName, action string) { - i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) -} +func (i *ImageService) LogImageEvent(ctx context.Context, imageID, refName, action string) { + // image has not been removed yet. + // it could be missing if the event is `delete`. + attributes, _ := i.getImageLabels(ctx, imageID) -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - // TODO(containerd): use i.getCachedRef(imageID) - img, err := i.getDockerImage(imageID) - if err == nil && img.V1Image.Config != nil { - // image has not been removed yet. - // it could be missing if the event is `delete`. - copyAttributes(attributes, img.V1Image.Config.Labels) + if attributes == nil { + attributes = map[string]string{} } + if refName != "" { attributes["name"] = refName } @@ -29,12 +29,26 @@ func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action stri i.eventsService.Log(action, events.ImageEventType, actor) } -// copyAttributes guarantees that labels are not mutated by event triggers. -func copyAttributes(attributes, labels map[string]string) { - if labels == nil { - return +func (i *ImageService) getImageLabels(ctx context.Context, imageID string) (map[string]string, error) { + img, err := i.GetImage(ctx, imageID) + if err != nil { + return nil, err } - for k, v := range labels { - attributes[k] = v + + p, err := content.ReadBlob(ctx, i.client.ContentStore(), img) + if err != nil { + return nil, err } + + var config struct { + Config struct { + Labels map[string]string + } + } + + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + + return config.Config.Labels, nil } diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 58105dcb710cc..390fad09ebf2a 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -3,7 +3,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "io" - "github.com/docker/docker/image/tarexport" + "github.com/containerd/containerd/errdefs" ) // ExportImage exports a list of images to the given output stream. The @@ -12,14 +12,19 @@ import ( // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - return imageExporter.Save(names, outStream) + // TODO(containerd): use containerd's archive exporter? + // This may require special logic to output the Docker format + //imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) + //return imageExporter.Save(names, outStream) + return errdefs.ErrNotImplemented } // LoadImage uploads a set of images into the repository. This is the // complement of ImageExport. The input stream is an uncompressed tar // ball containing images and metadata. func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - return imageExporter.Load(inTar, outStream, quiet) + // TODO(containerd): use containerd's archive importer + //imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) + //return imageExporter.Load(inTar, outStream, quiet) + return errdefs.ErrNotImplemented } diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index af0cafde1f9aa..d05d1681e9425 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -1,6 +1,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "encoding/json" "io" "net/http" @@ -27,7 +28,7 @@ import ( // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. -func (i *ImageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { +func (i *ImageService) ImportImage(ctx context.Context, src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( rc io.ReadCloser resp *http.Response @@ -133,7 +134,7 @@ func (i *ImageService) ImportImage(src string, repository, os string, tag string // } //} - i.LogImageEvent(id.String(), id.String(), "import") + i.LogImageEvent(ctx, id.String(), id.String(), "import") outStream.Write(streamformatter.FormatStatus("", id.String())) return nil } diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index ceb1ca6f95a65..1a71c310f32d8 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -68,6 +68,6 @@ func (i *ImageService) tagImage(ctx context.Context, img imageLink) error { } // TODO(containerd): Set last updated for target - i.LogImageEvent(img.target.Digest.String(), reference.FamiliarString(img.name), "tag") + i.LogImageEvent(ctx, img.target.Digest.String(), reference.FamiliarString(img.name), "tag") return i.updateCache(ctx, img) } diff --git a/daemon/images/images.go b/daemon/images/images.go index a16a253f89210..8ce1dd7daa467 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -38,11 +38,6 @@ func (r byCreated) Len() int { return len(r) } func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } -// Map returns a map of all images in the ImageStore -func (i *ImageService) Map() map[image.ID]*image.Image { - return i.imageStore.Map() -} - // Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by api/types/filters. // filter is a shell glob string applied to repository names. The argument diff --git a/distribution/config.go b/distribution/config.go index e9631d1b8c0e7..6104583d28380 100644 --- a/distribution/config.go +++ b/distribution/config.go @@ -37,7 +37,7 @@ type Config struct { // and endpoint lookup. RegistryService registry.Service // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) + ImageEventLogger func(ctx context.Context, id, name, action string) // MetadataStore is the storage backend for distribution-specific // metadata. MetadataStore metadata.Store diff --git a/distribution/pull.go b/distribution/pull.go index be366ce4a99b6..149708d5cf661 100644 --- a/distribution/pull.go +++ b/distribution/pull.go @@ -145,7 +145,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo return TranslatePullError(err, ref) } - imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + imagePullConfig.ImageEventLogger(ctx, reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") return nil } diff --git a/distribution/push.go b/distribution/push.go index 5617a4c95f498..444b66a5adac9 100644 --- a/distribution/push.go +++ b/distribution/push.go @@ -129,7 +129,7 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo return err } - imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + imagePushConfig.ImageEventLogger(ctx, reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") return nil } diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go index 044e14b0cbf81..9b2ed48e23207 100644 --- a/plugin/backend_linux.go +++ b/plugin/backend_linux.go @@ -226,7 +226,7 @@ func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHead MetaHeaders: metaHeader, AuthConfig: authConfig, RegistryService: pm.config.RegistryService, - ImageEventLogger: func(string, string, string) {}, + ImageEventLogger: func(context.Context, string, string, string) {}, ImageStore: cs, }, Schema2Types: distribution.PluginTypes, @@ -279,11 +279,13 @@ func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string pluginPullConfig := &distribution.ImagePullConfig{ Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - RegistryService: pm.config.RegistryService, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: dm, + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(ctx context.Context, id, name, action string) { + pm.config.LogPluginEvent(id, name, action) + }, + ImageStore: dm, }, DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead Schema2Types: distribution.PluginTypes, @@ -331,11 +333,13 @@ func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, m pluginPullConfig := &distribution.ImagePullConfig{ Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - RegistryService: pm.config.RegistryService, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: dm, + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(ctx context.Context, id, name, action string) { + pm.config.LogPluginEvent(id, name, action) + }, + ImageStore: dm, }, DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead Schema2Types: distribution.PluginTypes, @@ -461,14 +465,16 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header imagePushConfig := &distribution.ImagePushConfig{ Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - ProgressOutput: po, - RegistryService: pm.config.RegistryService, - ReferenceStore: rs, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: is, - RequireSchema2: true, + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: func(ctx context.Context, id, name, action string) { + pm.config.LogPluginEvent(id, name, action) + }, + ImageStore: is, + RequireSchema2: true, }, ConfigMediaType: schema2.MediaTypePluginConfig, LayerStores: lss, From 177c88945175b7e5bdbb389964475eb7ccc39f9f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 11 Feb 2019 10:38:25 -0800 Subject: [PATCH 21/73] Add push support through containerd Uses containerd client to push. Temporarily resolves current platform on push to match current behavior. Signed-off-by: Derek McGowan --- daemon/images/image_push.go | 98 ++++++++++++++++++++++++++++++------- 1 file changed, 79 insertions(+), 19 deletions(-) diff --git a/daemon/images/image_push.go b/daemon/images/image_push.go index c397b1cd5218e..db9e091af4f07 100644 --- a/daemon/images/image_push.go +++ b/daemon/images/image_push.go @@ -2,15 +2,23 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "encoding/json" + "fmt" "io" + "sort" "time" - "github.com/docker/distribution/manifest/schema2" + "github.com/containerd/containerd" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/pkg/progress" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // PushImage initiates a push operation on the repository named localName. @@ -20,12 +28,30 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea if err != nil { return err } + is := i.client.ImageService() + + var imgs []images.Image if tag != "" { // Push by digest is not supported, so only tags are supported. ref, err = reference.WithTag(ref, tag) if err != nil { return err } + + img, err := is.Get(ctx, ref.String()) + if err != nil { + return errors.Wrap(err, "unable to get image") + } + imgs = append(imgs, img) + } else { + // TODO(containerd): Escape '.' in ref + imgs, err := is.List(ctx, fmt.Sprintf("name~=^%s:.*$", ref.String())) + if err != nil { + return errors.Wrap(err, "unable to get image") + } + if len(imgs) == 0 { + return errors.Wrap(errdefs.ErrNotFound, "no matching images") + } } // Include a buffer so that slow client connections don't affect @@ -41,25 +67,59 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea close(writesDone) }() - imagePushConfig := &distribution.ImagePushConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, - MetadataStore: i.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), - ReferenceStore: i.referenceStore, - }, - ConfigMediaType: schema2.MediaTypeImageConfig, - LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), - UploadManager: i.uploadManager, + // TODO(containerd): Handle authConfig + // TODO(containerd): Handle metaHeaders + opts := []containerd.RemoteOpt{} + + for _, img := range imgs { + // TODO(containerd): Check for migrations to do + + switch img.Target.MediaType { + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + // TODO(containerd): Don't resolve manifest, rely on cross manifest push + // or push all platforms + p, err := content.ReadBlob(ctx, i.client.ContentStore(), img.Target) + if err != nil { + return errors.Wrap(err, "unable to read manifest list") + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return err + } + + platform := platforms.Default() + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + if len(descs) > 0 { + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + img.Target = descs[0] + } + + default: + // Keep target + } + if err = i.client.Push(ctx, img.Name, img.Target, opts...); err != nil { + err = errors.Wrap(err, "failed to push") + break + } } - err = distribution.Push(ctx, ref, imagePushConfig) - close(progressChan) - <-writesDone + //close(progressChan) + //<-writesDone imageActions.WithValues("push").UpdateSince(start) + return err } From 77e85abc1fb2c74dd7880fffee7bf7e213f4cf6f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 12 Feb 2019 12:04:27 -0800 Subject: [PATCH 22/73] Update images to use manifest as image ID Lookup images without using caching Signed-off-by: Derek McGowan --- daemon/images/cache.go | 16 +++-- daemon/images/image.go | 69 ++++++++++++++++++++++ daemon/images/images.go | 128 ++++++++++++++++++++++++---------------- 3 files changed, 158 insertions(+), 55 deletions(-) diff --git a/daemon/images/cache.go b/daemon/images/cache.go index b72f60e878a27..05c4e59e5acf0 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -40,8 +40,10 @@ type cache struct { // idCache maps Docker identifiers idCache map[digest.Digest]*cachedImage // tCache maps target digests to images - tCache map[digest.Digest]*cachedImage - ids *digestset.Set + tCache map[digest.Digest]*cachedImage + ids *digestset.Set + targets *digestset.Set + descriptors map[digest.Digest]ocispec.Descriptor } func (c *cache) byID(id digest.Digest) *cachedImage { @@ -83,9 +85,11 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach defer i.cacheL.Unlock() c := &cache{ - idCache: map[digest.Digest]*cachedImage{}, - tCache: map[digest.Digest]*cachedImage{}, - ids: digestset.NewSet(), + idCache: map[digest.Digest]*cachedImage{}, + tCache: map[digest.Digest]*cachedImage{}, + ids: digestset.NewSet(), + targets: digestset.NewSet(), + descriptors: map[digest.Digest]ocispec.Descriptor{}, } is := i.client.ImageService() @@ -186,6 +190,8 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach c.ids.Add(id.Digest) } c.tCache[img.Target.Digest] = ci + c.targets.Add(img.Target.Digest) + c.descriptors[img.Target.Digest] = img.Target // Load image layer to prevent removal } diff --git a/daemon/images/image.go b/daemon/images/image.go index dff18677cc705..c5021d6a68a52 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -29,6 +29,11 @@ const ( // LabelImageDangling refers to images with no name // Stored on images and points to the image config digest LabelImageDangling = "docker.io/image.dangling" + + // LabelLayerPrefix is used as the label prefix for layer stores + // Stores the layer reference in the given layerstore. + // The value always represents the digest of the ChainID + LabelLayerPrefix = "docker.io/layer." ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. @@ -56,6 +61,70 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string) (ocispec.De return ci.config, nil } +// SearchImage searches for an image based on the given +// reference or identifier. Returns the descriptor of +// the image, could be manifest list, manifest, or config. +func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { + parsed, err := reference.ParseAnyReference(refOrID) + if err != nil { + return ocispec.Descriptor{}, err + } + + is := i.client.ImageService() + + c, err := i.getCache(ctx) + if err != nil { + return ocispec.Descriptor{}, err + } + + c.m.RLock() + defer c.m.RUnlock() + + namedRef, ok := parsed.(reference.Named) + if !ok { + digested, ok := parsed.(reference.Digested) + if !ok { + return ocispec.Descriptor{}, errdefs.InvalidParameter(errors.New("bad reference")) + } + + // Check if descriptor is cached + desc, ok := c.descriptors[digested.Digest()] + if ok { + return desc, nil + } + + imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to lookup digest") + } + if len(imgs) == 0 { + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("image not find with digest")) + } + + return imgs[0].Target, nil + } + + img, err := is.Get(ctx, namedRef.String()) + if err != nil { + if !cerrdefs.IsNotFound(err) { + return ocispec.Descriptor{}, err + } + dgst, err := c.targets.Lookup(refOrID) + if err != nil { + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("reference not found")) + } + + desc, ok := c.descriptors[dgst] + if ok { + return desc, nil + } + + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("id not found")) + } + + return img.Target, nil +} + // GetImage returns an image corresponding to the image referred to by refOrID. // Deprecated: Use (i *ImageService).GetImage instead. func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { diff --git a/daemon/images/images.go b/daemon/images/images.go index 8ce1dd7daa467..741d83fe75b65 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -61,21 +62,38 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} } } + cs := i.client.ContentStore() - var beforeFilter, sinceFilter *image.Image + var beforeFilter, sinceFilter *time.Time err = imageFilters.WalkValues("before", func(value string) error { - var err error - beforeFilter, err = i.getDockerImage(value) - return err + img, err := i.ResolveImage(ctx, value) + if err != nil { + return err + } + info, err := cs.Info(ctx, img.Digest) + if err != nil { + return err + } + + beforeFilter = &info.CreatedAt + return nil }) if err != nil { return nil, err } err = imageFilters.WalkValues("since", func(value string) error { - var err error - sinceFilter, err = i.getDockerImage(value) - return err + img, err := i.ResolveImage(ctx, value) + if err != nil { + return err + } + info, err := cs.Info(ctx, img.Digest) + if err != nil { + return err + } + + sinceFilter = &info.CreatedAt + return nil }) if err != nil { return nil, err @@ -86,7 +104,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al filters = append(filters, "name~=/sha256:[a-z0-9]+/") } else if imageFilters.Contains("reference") { for _, v := range imageFilters.Get("reference") { - // TODO: Parse reference, if only partial match then + // TODO(containerd): Parse reference, if only partial match then // use as regex filters = append(filters, "name=="+v) } @@ -120,38 +138,26 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al } m := map[digest.Digest][]images.Image{} + created := map[digest.Digest]time.Time{} c.m.RLock() for _, img := range allImages { - if beforeFilter != nil && beforeFilter.Image.Created != nil { - created := img.Labels["docker.io/created"] - if created != "" { - t, err := time.Parse(created, time.RFC3339) - if err == nil && t.Equal(*beforeFilter.Image.Created) || t.After(*beforeFilter.Image.Created) { - continue - } - } + info, err := cs.Info(ctx, img.Target.Digest) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Warnf("failed to stat target") + continue } - if sinceFilter != nil && sinceFilter.Image.Created != nil { - created := img.Labels["docker.io/created"] - if created != "" { - t, err := time.Parse(created, time.RFC3339) - if err == nil && t.Equal(*sinceFilter.Image.Created) || t.Before(*sinceFilter.Image.Created) { - continue - } - } - + if beforeFilter != nil && !info.CreatedAt.Before(*beforeFilter) { + continue } - ci, ok := c.tCache[img.Target.Digest] - if !ok { - // TODO(containerd): Lookup config and update cache - log.G(ctx).WithField("name", img.Name).Debugf("skipping non-cached image") + if sinceFilter != nil && !info.CreatedAt.After(*sinceFilter) { continue } - m[ci.config.Digest] = append(m[ci.config.Digest], img) + m[img.Target.Digest] = append(m[img.Target.Digest], img) + created[img.Target.Digest] = info.CreatedAt //var size int64 // TODO: this seems pretty dumb to do @@ -229,29 +235,51 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al //var allLayers map[layer.ChainID]layer.Layer //var allContainers []*container.Container - // TODO: For each found image ID, add references - for config, imgs := range m { + for dgst, imgs := range m { newImage := new(types.ImageSummary) - newImage.ID = config.String() - - image, err := i.getImage(ctx, ocispec.Descriptor{Digest: config}) - if err != nil { - // TODO(containerd): log this - continue - } - if image.Image.Created != nil { - newImage.Created = image.Image.Created.Unix() + newImage.ID = dgst.String() + newImage.Created = created[dgst].Unix() + + var target = imgs[0].Target + var config ocispec.Descriptor + + switch target.MediaType { + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + default: + // TODO(containerd): Set this more globally and to + // an appropriate value for Windows + platform := platforms.Default() + + desc, err := images.Config(ctx, cs, imgs[0].Target, platform) + if err != nil { + log.G(ctx).WithError(err).WithField("image", dgst.String()).Warnf("unable to resolve config") + continue + } + config = desc } - // TODO: Fill this in from config and content labels - //newImage.ParentID = image.Parent.String() - //newImage.Size = size - //newImage.VirtualSize = size - //newImage.SharedSize = -1 - //newImage.Containers = -1 - //if image.Config != nil { - // newImage.Labels = image.Config.Labels - //} + // TODO(containerd): Stat config + if info, err := cs.Info(ctx, config.Digest); err == nil { + for label, value := range info.Labels { + if label == LabelImageParent { + newImage.ParentID = value + } else if strings.HasPrefix(label, LabelLayerPrefix) { + // TODO: Lookup from layer store + } + // TODO(containerd): Store size in label + } + // TODO(containerd): Resolve config for current platform + // TODO(containerd): Fill this in from config and content labels + //newImage.Size = size + //newImage.VirtualSize = size + //newImage.SharedSize = -1 + //newImage.Containers = -1 + //if image.Config != nil { + // newImage.Labels = image.Config.Labels + //} + } else { + log.G(ctx).WithError(err).WithField("digest", config.Digest.String()).Warnf("unable to get image config info") + } // TODO: Add each image reference // For these, unique them by manifest, none:none or none@digest From bbe1d776ee4522eb40c96ef6fefe7543169fd784 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 Feb 2019 17:17:48 -0800 Subject: [PATCH 23/73] Add layer label to content Adds equivalent label as the snapshotter gc label Signed-off-by: Derek McGowan --- daemon/images/image_commit.go | 11 +++++++---- daemon/images/image_pull.go | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index ec5fc542f1259..805968d75a169 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -129,13 +129,16 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) Size: int64(len(config)), } - opts := []content.Opt{} + labels := map[string]string{ + fmt.Sprintf("%s%s", LabelLayerPrefix, layerStore.DriverName()): l.ChainID().String(), + } if c.ParentImageID != "" { - opts = append(opts, content.WithLabels(map[string]string{ - LabelImageParent: c.ParentImageID, - })) + labels[LabelImageParent] = c.ParentImageID } + + opts := []content.Opt{content.WithLabels(labels)} + ref := fmt.Sprintf("config-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc, opts...); err != nil { layer.ReleaseAndLog(layerStore, l) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 6cc0ccbf3a4d8..050cc8ed1ba5f 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -2,6 +2,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "fmt" "io" "runtime" "strings" @@ -167,6 +168,7 @@ func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (l var ( chain = []digest.Digest{} l layer.Layer + ls = i.layerStores[runtime.GOOS] ) for d := range diffIDs { chain = append(chain, diffIDs[d]) @@ -178,7 +180,7 @@ func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (l logrus.Debugf("Layer applied: %s (%s)", nl.DiffID(), diffIDs[d]) if l != nil { - metadata, err := i.layerStores[runtime.GOOS].Release(l) + metadata, err := ls.Release(l) if err != nil { return nil, errors.Wrap(err, "failed to release layer") } @@ -189,6 +191,20 @@ func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (l l = nl } + + key := fmt.Sprintf("%s%s", LabelLayerPrefix, ls.DriverName()) + info := content.Info{ + Digest: manifest.Config.Digest, + Labels: map[string]string{ + key: l.ChainID().String(), + }, + } + + if _, err := cs.Update(ctx, info, "labels."+key); err != nil { + layer.ReleaseAndLog(ls, l) + return nil, errors.Wrap(err, "failed to update image config label") + } + return l, nil } From f5282b9cfd62bf14bfac1a05e4cf592df153ce6e Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Thu, 14 Feb 2019 16:33:27 -0800 Subject: [PATCH 24/73] unpack layers simultaneously Signed-off-by: Anda Xu --- daemon/images/image_pull.go | 159 ++++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 45 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 050cc8ed1ba5f..b57eeb8eaf5d5 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -4,32 +4,34 @@ import ( "context" "fmt" "io" + "io/ioutil" "runtime" "strings" "sync" "time" - "github.com/docker/docker/pkg/stringid" - "github.com/containerd/containerd" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// default maximum concurrent downloads allowed during docker pull +const defaultMaxConcurrentDownloads = 3 + // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. func (i *ImageService) PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { @@ -69,26 +71,92 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return err } + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) ongoing := newJobs(ref.Name()) - pctx, stopProgress := context.WithCancel(ctx) progress := make(chan struct{}) go func() { // no progress bar, because it hides some debug logs - showProgress(pctx, ongoing, ref, i.client.ContentStore(), outStream) + showProgress(pctx, ongoing, ref, i.client.ContentStore(), progressOutput) close(progress) }() - // TODO: Lease - h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + + h := images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { if desc.MediaType != images.MediaTypeDockerSchema1Manifest { ongoing.add(desc) } return nil, nil }) + + var ( + l layer.Layer + layers []specs.Descriptor + dlStatus = map[digest.Digest]bool{} + dlChan = make(chan digest.Digest, 5) + unpackChan = make(chan struct{}) + ) + // unpackHandler handles layer unpacking concurrently as soon as + // a layer in order has been downloaded + unpackHandler := func(h images.Handler) images.Handler { + return images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { + logrus.Infof("parent desc -> type=%s id=%s", desc.MediaType, stringid.TruncateID(desc.Digest.String())) + children, err := h.Handle(ctx, desc) + if err != nil { + return children, err + } + // manifest downloaded + if len(children) > 1 { + // trim off config descriptor only keep the layer descriptors + layers = children[1:] + // start the message broker to track layer downloading status + go func() { + // check the layers downloading status according to + // the order from manifest so that the unpacking + // process will be signaled in the same order as well. + // Also buffer the layers which downloaded faster + // ahead of the order. + for i := 0; i < len(layers); { + if ok := dlStatus[layers[i].Digest]; ok { + unpackChan <- struct{}{} + i++ + continue + } + select { + case d := <-dlChan: + if d == layers[i].Digest { + unpackChan <- struct{}{} + i++ + } + dlStatus[d] = true + } + } + }() + } + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Config: + // handle unpack + l, err = i.unpack(pctx, desc, layers, progressOutput, unpackChan) + if err != nil { + return nil, errors.Wrapf(err, "failed to unpack %s", desc.Digest) + } + case images.MediaTypeDockerSchema2LayerGzip: + // a layer has been downloaded, signal downloaded status + dlChan <- desc.Digest + } + + return children, nil + }) + } + opts := []containerd.RemoteOpt{ containerd.WithImageHandler(h), + containerd.WithImageHandlerWrapper(unpackHandler), + containerd.WithMaxConcurrentDownloads(defaultMaxConcurrentDownloads), } + + // TODO: Lease // TODO: Custom resolver // - Auth config // - Custom headers @@ -97,15 +165,12 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO: unpack tracking, use download manager for now? img, err := i.client.Pull(pctx, ref.String(), opts...) - - config, err := img.Config(pctx) if err != nil { - return errors.Wrap(err, "failed to resolve configuration") + return errors.Wrap(err, "failed to pull image") } - - l, err := i.unpack(pctx, img.Target()) + config, err := img.Config(pctx) if err != nil { - return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) + return errors.Wrap(err, "failed to resolve configuration") } // TODO: Unpack into layer store @@ -147,37 +212,34 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO: Add shallow pull function which returns descriptor -func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (layer.Layer, error) { +func (i *ImageService) unpack(ctx context.Context, config specs.Descriptor, layers []specs.Descriptor, progressOutput progress.Output, unpackChan chan struct{}) (layer.Layer, error) { var ( cs = i.client.ContentStore() + ls = i.layerStores[runtime.GOOS] ) - manifest, err := images.Manifest(ctx, cs, target, platforms.Default()) - if err != nil { - return nil, err - } - - diffIDs, err := images.RootFS(ctx, cs, manifest.Config) + diffIDs, err := images.RootFS(ctx, cs, config) if err != nil { return nil, errors.Wrap(err, "failed to resolve rootfs") } - if len(diffIDs) != len(manifest.Layers) { + if len(diffIDs) != len(layers) { return nil, errors.Errorf("mismatched image rootfs and manifest layers") } var ( chain = []digest.Digest{} l layer.Layer - ls = i.layerStores[runtime.GOOS] ) for d := range diffIDs { chain = append(chain, diffIDs[d]) - - nl, err := i.applyLayer(ctx, manifest.Layers[d], chain) + // start unpacking upon signaled after current layer downloading complete + <-unpackChan + nl, err := i.applyLayer(ctx, layers[d], chain, progressOutput) if err != nil { + logrus.Errorf("apply layer failed -> %s", err) return nil, errors.Wrapf(err, "failed to apply layer %d", d) } - logrus.Debugf("Layer applied: %s (%s)", nl.DiffID(), diffIDs[d]) + logrus.Debugf("Layer applied: chain=%s %s (%s)", nl.ChainID(), nl.DiffID(), diffIDs[d]) if l != nil { metadata, err := ls.Release(l) @@ -194,7 +256,7 @@ func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (l key := fmt.Sprintf("%s%s", LabelLayerPrefix, ls.DriverName()) info := content.Info{ - Digest: manifest.Config.Digest, + Digest: config.Digest, Labels: map[string]string{ key: l.ChainID().String(), }, @@ -208,7 +270,7 @@ func (i *ImageService) unpack(ctx context.Context, target ocispec.Descriptor) (l return l, nil } -func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, layers []digest.Digest) (layer.Layer, error) { +func (i *ImageService) applyLayer(ctx context.Context, blob specs.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { var ( cs = i.client.ContentStore() ls = i.layerStores[runtime.GOOS] @@ -227,7 +289,12 @@ func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, } defer ra.Close() - dc, err := compression.DecompressStream(content.NewReader(ra)) + rc := ioutil.NopCloser(content.NewReader(ra)) + blobId := stringid.TruncateID(blob.Digest.String()) + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, rc), progressOutput, blob.Size, blobId, "Extracting") + defer reader.Close() + + dc, err := compression.DecompressStream(reader) if err != nil { return nil, err } @@ -240,6 +307,7 @@ func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, return ls.Register(dc, layer.ChainID(parent)) } + func getTagOrDigest(ref reference.Named) string { var ( // manifest distribution.Manifest @@ -255,14 +323,15 @@ func getTagOrDigest(ref reference.Named) string { } const ( - downloading = "Downloading" - dlcomplete = "Download complete" - waiting = "Waiting" - exists = "Already exists" + downloading = "Downloading" + dlcomplete = "Download complete" + waiting = "Waiting" + exists = "Already exists" + pullcomplete = "Pull complete" ) -func showProgress(ctx context.Context, ongoing *jobs, ref reference.Named, cs content.Store, out io.Writer) { - progressOutput := streamformatter.NewJSONProgressOutput(out, false) +func showProgress(ctx context.Context, ongoing *jobs, ref reference.Named, cs content.Store, progressOutput progress.Output) { + // progressOutput := streamformatter.NewJSONProgressOutput(out, false) progressOutput.WriteProgress(progress.Progress{ID: getTagOrDigest(ref), Message: "Pulling from " + reference.Path(ref)}) var ( ticker = time.NewTicker(100 * time.Millisecond) @@ -332,15 +401,15 @@ outer: } } } else if done { - progressOutput.WriteProgress(progress.Progress{ID: descID, Action: dlcomplete}) + progressOutput.WriteProgress(progress.Progress{ID: descID, Action: pullcomplete}) if ok { if status.Status != dlcomplete && status.Status != exists { - status.Status = dlcomplete + status.Status = pullcomplete statuses[descID] = status } } else { statuses[descID] = StatusInfo{ - Status: dlcomplete, + Status: pullcomplete, } } } @@ -360,12 +429,12 @@ type StatusInfo struct { Status string } -func isLayer(desc ocispec.Descriptor) bool { +func isLayer(desc specs.Descriptor) bool { switch desc.MediaType { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + specs.MediaTypeImageLayer, specs.MediaTypeImageLayerGzip, + specs.MediaTypeImageLayerNonDistributable, specs.MediaTypeImageLayerNonDistributableGzip: return true default: return false @@ -380,7 +449,7 @@ func isLayer(desc ocispec.Descriptor) bool { type jobs struct { name string added map[digest.Digest]struct{} - descs []ocispec.Descriptor + descs []specs.Descriptor mu sync.Mutex } @@ -391,7 +460,7 @@ func newJobs(name string) *jobs { } } -func (j *jobs) add(desc ocispec.Descriptor) { +func (j *jobs) add(desc specs.Descriptor) { j.mu.Lock() defer j.mu.Unlock() @@ -402,10 +471,10 @@ func (j *jobs) add(desc ocispec.Descriptor) { j.added[desc.Digest] = struct{}{} } -func (j *jobs) jobs() []ocispec.Descriptor { +func (j *jobs) jobs() []specs.Descriptor { j.mu.Lock() defer j.mu.Unlock() - var descs []ocispec.Descriptor + var descs []specs.Descriptor return append(descs, j.descs...) } From 1091b0ab79dde1dfee87f517027504738d64c41a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 25 Feb 2019 13:36:41 -0800 Subject: [PATCH 25/73] Resolve runtime image Signed-off-by: Derek McGowan --- api/types/configs.go | 3 +- daemon/container.go | 25 +++--- daemon/create.go | 97 ++++++++--------------- daemon/images/image.go | 161 +++++++++++++++++++++++++++++++++++++++ daemon/images/service.go | 5 ++ daemon/start.go | 7 +- daemon/update.go | 7 +- 7 files changed, 228 insertions(+), 77 deletions(-) diff --git a/api/types/configs.go b/api/types/configs.go index 178e911a7afc2..c75abd743084d 100644 --- a/api/types/configs.go +++ b/api/types/configs.go @@ -11,7 +11,8 @@ import ( // ContainerCreateConfig is the parameter set to ContainerCreate() type ContainerCreateConfig struct { - Name string + Name string + // TODO(containerd): Add Platform (OS, Architecture, Variant) Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig diff --git a/daemon/container.go b/daemon/container.go index 526e029731d58..87cec59d03abf 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -12,6 +12,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/images" "github.com/docker/docker/daemon/network" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" @@ -128,7 +129,7 @@ func (daemon *Daemon) Register(c *container.Container) error { return c.CheckpointTo(daemon.containersReplica) } -func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, img ocispec.Descriptor, managed bool) (*container.Container, error) { +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, img images.RuntimeImage, managed bool) (*container.Container, error) { var ( id string err error @@ -158,11 +159,15 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config * base.Args = args //FIXME: de-duplicate from config base.Config = config base.HostConfig = &containertypes.HostConfig{} - base.ImageID = image.ID(img.Digest) + // TODO(Containerd): Set image id and runtime image id + base.ImageID = image.ID(img.Config.Digest) base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name - base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem) - base.OS = operatingSystem + // TODO(containerd): Rename this function or pass it in, get it after layer created? + base.Driver = daemon.imageService.GraphDriverForOS(img.Platform.OS) + base.OS = img.Platform.OS + // TODO(containerd): Set architecture + // TODO(containerd): Set variant return base, err } @@ -234,7 +239,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig * // verifyContainerSettings performs validation of the hostconfig and config // structures. -func (daemon *Daemon) verifyContainerSettings(platform string, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) { +func (daemon *Daemon) verifyContainerSettings(platform ocispec.Platform, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) { // First perform verification of settings common across all platforms. if err = validateContainerConfig(config, platform); err != nil { return warnings, err @@ -251,7 +256,7 @@ func (daemon *Daemon) verifyContainerSettings(platform string, hostConfig *conta return warnings, err } -func validateContainerConfig(config *containertypes.Config, platform string) error { +func validateContainerConfig(config *containertypes.Config, platform ocispec.Platform) error { if config == nil { return nil } @@ -272,7 +277,7 @@ func validateContainerConfig(config *containertypes.Config, platform string) err return validateHealthCheck(config.Healthcheck) } -func validateHostConfig(hostConfig *containertypes.HostConfig, platform string) error { +func validateHostConfig(hostConfig *containertypes.HostConfig, platform ocispec.Platform) error { if hostConfig == nil { return nil } @@ -280,7 +285,7 @@ func validateHostConfig(hostConfig *containertypes.HostConfig, platform string) return errors.Errorf("can't create 'AutoRemove' container with restart policy") } // Validate mounts; check if host directories still exist - parser := volumemounts.NewParser(platform) + parser := volumemounts.NewParser(platform.OS) for _, cfg := range hostConfig.Mounts { if err := parser.ValidateMountConfig(&cfg); err != nil { return err @@ -383,13 +388,13 @@ func validateRestartPolicy(policy containertypes.RestartPolicy) error { // translateWorkingDir translates the working-dir for the target platform, // and returns an error if the given path is not an absolute path. -func translateWorkingDir(config *containertypes.Config, platform string) error { +func translateWorkingDir(config *containertypes.Config, platform ocispec.Platform) error { if config.WorkingDir == "" { return nil } wd := config.WorkingDir switch { - case runtime.GOOS != platform: + case runtime.GOOS != platform.OS: // LCOW. Force Unix semantics wd = strings.Replace(wd, string(os.PathSeparator), "/", -1) if !path.IsAbs(wd) { diff --git a/daemon/create.go b/daemon/create.go index fb321b070c191..6a700904114eb 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" @@ -17,8 +17,8 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -26,6 +26,7 @@ import ( type createOpts struct { params types.ContainerCreateConfig + rImage images.RuntimeImage managed bool ignoreImagesArgsEscaped bool } @@ -61,23 +62,26 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) } - os := runtime.GOOS - // TODO(containerd): Resolve os for LCOW - // TODO(containerd): Why is this lookup done twice just for LCOW?? - //if opts.params.Config.Image != "" { - // _, img, err := daemon.imageService.GetImage(context.TODO(), params.Config.Image) - // if err == nil { - // os = img.OS - // } - //} else { - // // This mean scratch. On Windows, we can safely assume that this is a linux - // // container. On other platforms, it's the host OS (which it already is) - // if runtime.GOOS == "windows" && system.LCOWSupported() { - // os = "linux" - // } - //} - - warnings, err := daemon.verifyContainerSettings(os, opts.params.HostConfig, opts.params.Config, false) + if opts.params.Config.Image != "" { + var err error + opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, opts.params.Config.Image) + if err != nil { + return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(err) + } + } else { + // TODO(containerd): move this logic to image service + opts.rImage.Platform = platforms.DefaultSpec() + + // This mean scratch. On Windows, we can safely assume that this is a linux + // container. On other platforms, it's the host OS (which it already is) + if opts.rImage.Platform.OS == "windows" && system.LCOWSupported() { + opts.rImage.Platform.OS = "linux" + opts.rImage.Platform.OSVersion = "" + opts.rImage.Platform.OSFeatures = []string{} + } + } + + warnings, err := daemon.verifyContainerSettings(opts.rImage.Platform, opts.params.HostConfig, opts.params.Config, false) if err != nil { return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) } @@ -112,18 +116,10 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) { var ( container *container.Container - desc ocispec.Descriptor err error ) - if opts.params.Config.Image != "" { - desc, err = daemon.imageService.GetImage(ctx, opts.params.Config.Image) - if err != nil { - return nil, err - } - } - - if err := daemon.mergeAndVerifyConfig(ctx, opts.params.Config, desc); err != nil { + if err := daemon.mergeAndVerifyConfig(ctx, opts.params.Config, opts.rImage.ConfigBytes); err != nil { return nil, errdefs.InvalidParameter(err) } @@ -131,33 +127,8 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return nil, errdefs.InvalidParameter(err) } - os := runtime.GOOS - if os == "windows" { - if desc.Digest != "" { - // TODO(containerd): resolve os for LCOW on Windows - // TODO(containerd): ensure platform in descriptor? - // TODO(containerd): Read blob - // TODO(containerd): Unmarshal OS - - //if img.OS != "" { - // os = img.OS - //} else { - // // default to the host OS except on Windows with LCOW - // if runtime.GOOS == "windows" && system.LCOWSupported() { - // os = "linux" - // } - //} - //imgID = desc.Digest - - //if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { - // return nil, errors.New("operating system on which parent image was created is not Windows") - //} - } else { - os = "linux" // 'scratch' case. - } - } - - if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, desc, opts.managed); err != nil { + // TODO(containerd): Move this before OS and image check, remove desc and os from here + if container, err = daemon.newContainer(opts.params.Name, opts.params.Config, opts.params.HostConfig, opts.rImage, opts.managed); err != nil { return nil, err } defer func() { @@ -193,11 +164,13 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai // Set RWLayer for container after mount labels have been set createOpts := []images.CreateLayerOpt{ - images.WithLayerImage(desc), + images.WithLayerImage(opts.rImage.Config), images.WithLayerContainer(container), images.WithLayerInit(setupInitLayer(daemon.idMapping)), + // TODO(containerd): pass in platform } + // TODO(containerd): return non layer type rwLayer, err := daemon.imageService.CreateLayer(ctx, createOpts...) if err != nil { return nil, errdefs.System(err) @@ -217,6 +190,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return nil, err } + // TODO(containerd): Add containerd GC and Docker layer labels if err := daemon.createContainerOSSpecificSettings(container, opts.params.Config, opts.params.HostConfig); err != nil { return nil, err } @@ -303,18 +277,13 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) return nil, nil } -func (daemon *Daemon) mergeAndVerifyConfig(ctx context.Context, config *containertypes.Config, img ocispec.Descriptor) error { - if img.Digest != "" { - p, err := content.ReadBlob(ctx, daemon.containerdCli.ContentStore(), img) - if err != nil { - return errors.Wrap(err, "failed to read config") - } - +func (daemon *Daemon) mergeAndVerifyConfig(ctx context.Context, config *containertypes.Config, configBytes []byte) error { + if len(configBytes) != 0 { // Only parse out the config key var imgConfig struct { Config *containertypes.Config `json:"config,omitempty"` } - if err := json.Unmarshal(p, &imgConfig); err != nil { + if err := json.Unmarshal(configBytes, &imgConfig); err != nil { return errors.Wrap(err, "failed to parse image config") } diff --git a/daemon/images/image.go b/daemon/images/image.go index c5021d6a68a52..9df3a3b8f63ff 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -4,10 +4,12 @@ import ( "context" "encoding/json" "fmt" + "sort" "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" @@ -125,6 +127,165 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe return img.Target, nil } +// RuntimeImage represents a platform-specific image along with the +// image configuration and targeted image ID. +type RuntimeImage struct { + Target ocispec.Descriptor + Config ocispec.Descriptor + ConfigBytes []byte + Platform ocispec.Platform +} + +// ResolveRuntimeImage resolves an image down to the platform-specific +// runtime configuration for the image. +// A runtime image is platform specific. +// The platform is resolved based on availability in the image and +// the order preference of the backend storage drivers. +func (i *ImageService) ResolveRuntimeImage(ctx context.Context, refOrID string) (RuntimeImage, error) { + desc, err := i.ResolveImage(ctx, refOrID) + if err != nil { + return RuntimeImage{}, err + } + + runtimeImages, err := i.runtimeImages(ctx, desc) + if err != nil { + return RuntimeImage{}, err + } + + // filter platforms, do inplace filtering since small sized array + for j := 0; j < len(runtimeImages); { + if !i.platforms.Match(runtimeImages[j].Platform) { + copy(runtimeImages[j:], runtimeImages[j+1:]) + runtimeImages = runtimeImages[:len(runtimeImages)-1] + } else { + j++ + } + } + + sort.SliceStable(runtimeImages, func(j, k int) bool { + return i.platforms.Less(runtimeImages[j].Platform, runtimeImages[k].Platform) + }) + + if len(runtimeImages) == 0 { + return RuntimeImage{}, errdefs.NotFound(errors.New("no runtime image found")) + } + + ri := runtimeImages[0] + if len(ri.ConfigBytes) == 0 { + ri.ConfigBytes, err = content.ReadBlob(ctx, i.client.ContentStore(), ri.Config) + if err != nil { + return RuntimeImage{}, err + } + } + + return ri, nil +} + +func (i *ImageService) runtimeImages(ctx context.Context, image ocispec.Descriptor) ([]RuntimeImage, error) { + var ( + imageMap = map[digest.Digest]RuntimeImage{} + runtimeImages []RuntimeImage + cs = i.client.ContentStore() + ) + + if err := images.Walk(ctx, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + image, ok := imageMap[desc.Digest] + if !ok { + image = RuntimeImage{ + Target: desc, + } + } + image.Config = desc + + p, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + if cerrdefs.IsNotFound(err) { + log.G(ctx).Debugf("image config missing: %s", desc.Digest.String()) + return nil, nil + } + return nil, err + } + + if err := json.Unmarshal(p, &image.Platform); err != nil { + return nil, err + } + + if image.Platform.OS == "" { + log.G(ctx).Warnf("image is missing platform: %s", desc.Digest.String()) + return nil, nil + } + + image.Platform = platforms.Normalize(image.Platform) + image.ConfigBytes = p + + runtimeImages = append(runtimeImages, image) + return nil, nil + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + if cerrdefs.IsNotFound(err) { + log.G(ctx).Debugf("image manifest missing: %s", desc.Digest.String()) + return nil, nil + } + return nil, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if image, ok := imageMap[desc.Digest]; ok { + if image.Platform.OS != "" { + // Use platform from manifest list + image.Config = manifest.Config + runtimeImages = append(runtimeImages, image) + return nil, nil + } else { + // Map config to the runtime image + imageMap[manifest.Config.Digest] = image + } + } else { + imageMap[manifest.Config.Digest] = RuntimeImage{ + Target: desc, + } + } + + return []ocispec.Descriptor{manifest.Config}, nil + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + for _, m := range idx.Manifests { + ri := RuntimeImage{ + Target: desc, + } + if m.Platform != nil { + ri.Platform = platforms.Normalize(*m.Platform) + } + imageMap[m.Digest] = ri + } + + return idx.Manifests, nil + + } + return nil, errdefs.NotFound(errors.Errorf("unexpected media type %v for %v", desc.MediaType, desc.Digest)) + }), image); err != nil { + return nil, err + } + + return runtimeImages, nil +} + // GetImage returns an image corresponding to the image referred to by refOrID. // Deprecated: Use (i *ImageService).GetImage instead. func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { diff --git a/daemon/images/service.go b/daemon/images/service.go index 542ddb2d85974..0f851a1f2518f 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" "github.com/docker/docker/distribution" @@ -66,6 +67,9 @@ func NewImageService(config ImageServiceConfig) *ImageService { referenceStore: config.ReferenceStore, registryService: config.RegistryService, uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), + + // TODO(containerd): derive from configured layerstores + platforms: platforms.Ordered(platforms.DefaultSpec()), } } @@ -76,6 +80,7 @@ type ImageService struct { containers containerStore eventsService *daemonevents.Events layerStores map[string]layer.Store // By operating system + platforms platforms.MatchComparer pruneRunning int32 // namespaced cache diff --git a/daemon/start.go b/daemon/start.go index 57a7267b7cbb6..bc66bd7ed8543 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -10,6 +10,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/mount" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -82,7 +83,11 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos // check if hostConfig is in line with the current system settings. // It may happen cgroups are umounted or the like. - if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil { + // TODO(containerd): Fill in from container configuration + platform := ocispec.Platform{ + OS: container.OS, + } + if _, err = daemon.verifyContainerSettings(platform, container.HostConfig, nil, false); err != nil { return errdefs.InvalidParameter(err) } // Adapt for old containers in case we have updates in this function and diff --git a/daemon/update.go b/daemon/update.go index 0ebb139d3dafc..667ed6afee7b4 100644 --- a/daemon/update.go +++ b/daemon/update.go @@ -6,6 +6,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -18,7 +19,11 @@ func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostCon return container.ContainerUpdateOKBody{Warnings: warnings}, err } - warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) + // TODO(containerd): get whole platform for container + platform := ocispec.Platform{ + OS: c.OS, + } + warnings, err = daemon.verifyContainerSettings(platform, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } From 9b2eb7a5e0e9436beafab1df8b8f65e4be7a6469 Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Mon, 25 Feb 2019 15:22:29 -0800 Subject: [PATCH 26/73] unblock layer extracting to a separate thread Signed-off-by: Anda Xu --- daemon/images/image_pull.go | 128 +++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 60 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index b57eeb8eaf5d5..33651ea70304d 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -24,7 +24,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -34,7 +34,7 @@ const defaultMaxConcurrentDownloads = 3 // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. -func (i *ImageService) PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (i *ImageService) PullImage(ctx context.Context, image, tag string, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { start := time.Now() // Special case: "pull -a" may send an image name with a // trailing :. This is ugly, but let's not break API @@ -65,7 +65,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor return err } -func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { c, err := i.getCache(ctx) if err != nil { return err @@ -82,7 +82,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference close(progress) }() - h := images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { + h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { if desc.MediaType != images.MediaTypeDockerSchema1Manifest { ongoing.add(desc) } @@ -90,60 +90,43 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - l layer.Layer - layers []specs.Descriptor - dlStatus = map[digest.Digest]bool{} - dlChan = make(chan digest.Digest, 5) - unpackChan = make(chan struct{}) + l layer.Layer + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + delayed = true // delayed unpack flag for schema 1 + lock = sync.Mutex{} + cond = sync.NewCond(&lock) + unpackErr = make(chan error) ) // unpackHandler handles layer unpacking concurrently as soon as - // a layer in order has been downloaded + // a layer has been downloaded in order unpackHandler := func(h images.Handler) images.Handler { - return images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { - logrus.Infof("parent desc -> type=%s id=%s", desc.MediaType, stringid.TruncateID(desc.Digest.String())) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { children, err := h.Handle(ctx, desc) if err != nil { return children, err } - // manifest downloaded - if len(children) > 1 { - // trim off config descriptor only keep the layer descriptors - layers = children[1:] - // start the message broker to track layer downloading status - go func() { - // check the layers downloading status according to - // the order from manifest so that the unpacking - // process will be signaled in the same order as well. - // Also buffer the layers which downloaded faster - // ahead of the order. - for i := 0; i < len(layers); { - if ok := dlStatus[layers[i].Digest]; ok { - unpackChan <- struct{}{} - i++ - continue - } - select { - case d := <-dlChan: - if d == layers[i].Digest { - unpackChan <- struct{}{} - i++ - } - dlStatus[d] = true - } - } - }() - } switch desc.MediaType { - case images.MediaTypeDockerSchema2Config: - // handle unpack - l, err = i.unpack(pctx, desc, layers, progressOutput, unpackChan) - if err != nil { - return nil, errors.Wrapf(err, "failed to unpack %s", desc.Digest) - } - case images.MediaTypeDockerSchema2LayerGzip: + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + lock.Lock() + // map the config to layers + layers[children[0].Digest] = children[1:] + lock.Unlock() + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + // handle schema2 unpack concurrently + delayed = false + go func() { + l, err = i.unpack(pctx, desc, layers[desc.Digest], progressOutput, cond, dlStatus) + unpackErr <- errors.Wrapf(err, "failed to unpack %s", desc.Digest) + }() + case images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer: // a layer has been downloaded, signal downloaded status - dlChan <- desc.Digest + lock.Lock() + dlStatus[desc.Digest] = true + lock.Unlock() + cond.Broadcast() } return children, nil @@ -170,7 +153,24 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } config, err := img.Config(pctx) if err != nil { - return errors.Wrap(err, "failed to resolve configuration") + return errors.Wrap(err, "failed to pull image") + } + // delayed unpacking for schema 1 + if delayed { + l, err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) + if err != nil { + return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) + } + } else { + // wait schema2 unpack to finish + select { + case <-pctx.Done(): + return errors.New("pull context cancelled") + case err = <-unpackErr: + if err != nil { + return err + } + } } // TODO: Unpack into layer store @@ -212,7 +212,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO: Add shallow pull function which returns descriptor -func (i *ImageService) unpack(ctx context.Context, config specs.Descriptor, layers []specs.Descriptor, progressOutput progress.Output, unpackChan chan struct{}) (layer.Layer, error) { +func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor, progressOutput progress.Output, cond *sync.Cond, status map[digest.Digest]bool) (layer.Layer, error) { var ( cs = i.client.ContentStore() ls = i.layerStores[runtime.GOOS] @@ -232,8 +232,16 @@ func (i *ImageService) unpack(ctx context.Context, config specs.Descriptor, laye ) for d := range diffIDs { chain = append(chain, diffIDs[d]) - // start unpacking upon signaled after current layer downloading complete - <-unpackChan + // start extracting upon signaled after current layer downloading complete + // otherwise wait upon the resource is ready + if cond != nil && status != nil { + cond.L.Lock() + for !status[layers[d].Digest] { + cond.Wait() + } + cond.L.Unlock() + } + nl, err := i.applyLayer(ctx, layers[d], chain, progressOutput) if err != nil { logrus.Errorf("apply layer failed -> %s", err) @@ -270,7 +278,7 @@ func (i *ImageService) unpack(ctx context.Context, config specs.Descriptor, laye return l, nil } -func (i *ImageService) applyLayer(ctx context.Context, blob specs.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { +func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { var ( cs = i.client.ContentStore() ls = i.layerStores[runtime.GOOS] @@ -429,12 +437,12 @@ type StatusInfo struct { Status string } -func isLayer(desc specs.Descriptor) bool { +func isLayer(desc ocispec.Descriptor) bool { switch desc.MediaType { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, - specs.MediaTypeImageLayer, specs.MediaTypeImageLayerGzip, - specs.MediaTypeImageLayerNonDistributable, specs.MediaTypeImageLayerNonDistributableGzip: + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, + ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: return true default: return false @@ -449,7 +457,7 @@ func isLayer(desc specs.Descriptor) bool { type jobs struct { name string added map[digest.Digest]struct{} - descs []specs.Descriptor + descs []ocispec.Descriptor mu sync.Mutex } @@ -460,7 +468,7 @@ func newJobs(name string) *jobs { } } -func (j *jobs) add(desc specs.Descriptor) { +func (j *jobs) add(desc ocispec.Descriptor) { j.mu.Lock() defer j.mu.Unlock() @@ -471,10 +479,10 @@ func (j *jobs) add(desc specs.Descriptor) { j.added[desc.Digest] = struct{}{} } -func (j *jobs) jobs() []specs.Descriptor { +func (j *jobs) jobs() []ocispec.Descriptor { j.mu.Lock() defer j.mu.Unlock() - var descs []specs.Descriptor + var descs []ocispec.Descriptor return append(descs, j.descs...) } From b9868747ba9c29d730b0e39e171bd1785962606f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 26 Feb 2019 14:36:34 -0800 Subject: [PATCH 27/73] Move layer creation to daemon package Signed-off-by: Derek McGowan --- daemon/create.go | 51 ++++++++++++++-------- daemon/images/image_delete.go | 1 + daemon/images/image_pull.go | 4 +- daemon/images/service.go | 80 ++++++----------------------------- 4 files changed, 48 insertions(+), 88 deletions(-) diff --git a/daemon/create.go b/daemon/create.go index 6a700904114eb..294a9d967e083 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -9,6 +9,7 @@ import ( "strings" "time" + cimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -16,9 +17,12 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/images" "github.com/docker/docker/errdefs" + "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -114,11 +118,6 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con // Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) { - var ( - container *container.Container - err error - ) - if err := daemon.mergeAndVerifyConfig(ctx, opts.params.Config, opts.rImage.ConfigBytes); err != nil { return nil, errdefs.InvalidParameter(err) } @@ -127,8 +126,8 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return nil, errdefs.InvalidParameter(err) } - // TODO(containerd): Move this before OS and image check, remove desc and os from here - if container, err = daemon.newContainer(opts.params.Name, opts.params.Config, opts.params.HostConfig, opts.rImage, opts.managed); err != nil { + container, err := daemon.newContainer(opts.params.Name, opts.params.Config, opts.params.HostConfig, opts.rImage, opts.managed) + if err != nil { return nil, err } defer func() { @@ -162,20 +161,10 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai } } - // Set RWLayer for container after mount labels have been set - createOpts := []images.CreateLayerOpt{ - images.WithLayerImage(opts.rImage.Config), - images.WithLayerContainer(container), - images.WithLayerInit(setupInitLayer(daemon.idMapping)), - // TODO(containerd): pass in platform - } - - // TODO(containerd): return non layer type - rwLayer, err := daemon.imageService.CreateLayer(ctx, createOpts...) + container.RWLayer, err = daemon.createRWLayer(ctx, opts.rImage, container) if err != nil { return nil, errdefs.System(err) } - container.RWLayer = rwLayer rootIDs := daemon.idMapping.RootPair() @@ -212,6 +201,32 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return container, nil } +func (daemon *Daemon) createRWLayer(ctx context.Context, img images.RuntimeImage, container *container.Container) (layer.RWLayer, error) { + var chainID digest.Digest + if img.Config.Digest != "" { + cs := daemon.containerdCli.ContentStore() + diffIDs, err := cimages.RootFS(ctx, cs, img.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + + chainID = identity.ChainID(diffIDs) + } + + ls, err := daemon.imageService.GetImageBackend(img) + if err != nil { + return nil, err + } + + rwLayerOpts := &layer.CreateRWLayerOpts{ + MountLabel: container.MountLabel, + StorageOpt: container.HostConfig.StorageOpt, + InitFunc: setupInitLayer(daemon.idMapping), + } + + return ls.CreateRWLayer(container.ID, layer.ChainID(chainID), rwLayerOpts) +} + func toHostConfigSelinuxLabels(labels []string) []string { for i, l := range labels { labels[i] = "label=" + l diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 1acaf420478ed..95fb2dbb6e6c4 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -56,6 +56,7 @@ const ( // Soft Conflict: // - any stopped container using the image. // - any repository tag or digest references to the image. +// - TODO(containerd): has label "io.cri-containerd.image==managed" // // The image cannot be removed if there are any hard conflicts and can be // removed if there are soft conflicts only if force is true. diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 33651ea70304d..320e115a649c7 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -187,7 +187,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference if ll != nil { metadata, err := i.layerStores[runtime.GOOS].Release(ll) if err != nil { - return errors.Wrap(err, "failed to release layer") + return errors.Wrap(err, "failed to release already retained layer") } layer.LogReleaseMetadata(metadata) } @@ -252,7 +252,7 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la if l != nil { metadata, err := ls.Release(l) if err != nil { - return nil, errors.Wrap(err, "failed to release layer") + return nil, errors.Wrap(err, "failed to release layer after apply") } layer.LogReleaseMetadata(metadata) } diff --git a/daemon/images/service.go b/daemon/images/service.go index 0f851a1f2518f..d5d9cdc66a4b7 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -21,7 +21,6 @@ import ( "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -149,77 +148,22 @@ func (i *ImageService) ChildrenByID(ctx context.Context, id digest.Digest) ([]di return ci.children, nil } -type createLayerOptions struct { - id string - image ocispec.Descriptor - container *container.Container - initFunc layer.MountInit -} - -type CreateLayerOpt func(*createLayerOptions) - -func WithLayerID(id string) CreateLayerOpt { - return func(o *createLayerOptions) { - o.id = id - } -} - -func WithLayerContainer(container *container.Container) CreateLayerOpt { - return func(o *createLayerOptions) { - o.container = container - } -} - -func WithLayerImage(config ocispec.Descriptor) CreateLayerOpt { - return func(o *createLayerOptions) { - o.image = config +// GetImageBackend returns the storage backend used by the given image +// TODO(containerd): return more abstract interface to support snapshotters +func (i *ImageService) GetImageBackend(image RuntimeImage) (layer.Store, error) { + if image.Config.Digest != "" { + // TODO(containerd): Get from content-store label + // TODO(containerd): Lookup by layer store names } -} - -func WithLayerInit(initFunc layer.MountInit) CreateLayerOpt { - return func(o *createLayerOptions) { - o.initFunc = initFunc - } -} - -// CreateLayer creates a filesystem layer for a container. -// called from create.go -func (i *ImageService) CreateLayer(ctx context.Context, opts ...CreateLayerOpt) (layer.RWLayer, error) { - var options createLayerOptions - for _, opt := range opts { - opt(&options) + if image.Platform.OS == "" { + image.Platform = platforms.DefaultSpec() } - - var chainID digest.Digest - if options.image.Digest != "" { - diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), options.image) - if err != nil { - return nil, errors.Wrap(err, "failed to resolve rootfs") - } - - chainID = identity.ChainID(diffIDs) - } - - rwLayerOpts := &layer.CreateRWLayerOpts{ - InitFunc: options.initFunc, - } - - if options.container != nil { - rwLayerOpts.MountLabel = options.container.MountLabel - rwLayerOpts.StorageOpt = options.container.HostConfig.StorageOpt - if options.id == "" { - options.id = options.container.ID - } - } - - if options.id == "" { - return nil, errors.New("no layer id provided") + ls, ok := i.layerStores[image.Platform.OS] + if !ok { + return nil, errdefs.Unavailable(errors.Errorf("no storage backend configured for %s", image.Platform.OS)) } - // Indexing by OS is safe here as validation of OS has already been performed in create() (the only - // caller), and guaranteed non-nil - // TODO(containerd): resolve through descriptor - return i.layerStores[runtime.GOOS].CreateRWLayer(options.id, layer.ChainID(chainID), rwLayerOpts) + return ls, nil } // GetLayerByID returns a layer by ID and operating system From fdc89269a81f2805cfed7cddb5108f65c09597f2 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 28 Feb 2019 16:26:03 -0800 Subject: [PATCH 28/73] Support for multiple layer stores Make layer stores multiplatform and allow configuring multiple ordered by preference. Signed-off-by: Derek McGowan --- daemon/daemon.go | 81 +++++++++------ daemon/images/cache.go | 89 +++++++++++----- daemon/images/image_commit.go | 22 ++-- daemon/images/image_delete.go | 6 ++ daemon/images/image_pull.go | 127 ++++++++++++----------- daemon/images/images.go | 9 +- daemon/images/service.go | 190 ++++++++++++++++++++++++---------- 7 files changed, 339 insertions(+), 185 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index aa0435d2673d1..9e1e62d2dddee 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/dialer" + "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -43,6 +44,7 @@ import ( "github.com/docker/docker/errdefs" "github.com/moby/buildkit/util/resolver" "github.com/moby/buildkit/util/tracing" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" // register graph drivers @@ -847,29 +849,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } } - // On Windows we don't support the environment variable, or a user supplied graphdriver - // as Windows has no choice in terms of which graphdrivers to use. It's a case of - // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, - // lcow. Unix platforms however run a single graphdriver for all containers, and it can - // be set through an environment variable, a daemon start parameter, or chosen through - // initialization of the layerstore through driver priority order for example. - d.graphDrivers = make(map[string]string) - layerStores := make(map[string]layer.Store) - if runtime.GOOS == "windows" { - d.graphDrivers[runtime.GOOS] = "windowsfilter" - if system.LCOWSupported() { - d.graphDrivers["linux"] = "lcow" - } - } else { - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver - } else { - logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) - } - d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. - } - d.RegistryService = registryService logger.RegisterPluginGetter(d.PluginStore) @@ -929,23 +908,65 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - for operatingSystem, gd := range d.graphDrivers { - layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ + type storageDriver struct { + platform ocispec.Platform + name string + } + var storageDrivers []storageDriver + + if runtime.GOOS == "windows" { + // On Windows we don't support the environment variable, or a user supplied graphdriver + // as Windows has no choice in terms of which graphdrivers to use. It's a case of + // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, + // lcow. Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + p := platforms.DefaultSpec() + storageDrivers = append(storageDrivers, storageDriver{p, "windowsfilter"}) + if system.LCOWSupported() { + p.OS = "linux" + p.OSVersion = "" + p.OSFeatures = nil + storageDrivers = append([]storageDriver{{p, "lcow"}}, storageDrivers...) + + } + } else { + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } else { + logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) + } + storageDrivers = append(storageDrivers, storageDriver{platforms.DefaultSpec(), driverName}) + + // TODO(containerd): probe system for additional configured graph drivers + } + + layerStores := make(map[string]layer.Store) + + var backends []images.LayerBackend + d.graphDrivers = make(map[string]string) + for _, driver := range storageDrivers { + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: gd, + GraphDriver: driver.name, GraphDriverOptions: config.GraphOptions, IDMapping: idMapping, PluginGetter: d.PluginStore, ExperimentalEnabled: config.Experimental, - OS: operatingSystem, + OS: driver.platform.OS, }) if err != nil { return nil, err } // As layerstore initialization may set the driver - d.graphDrivers[operatingSystem] = layerStores[operatingSystem].DriverName() + d.graphDrivers[driver.platform.OS] = ls.DriverName() + backends = append(backends, images.LayerBackend{ + Store: ls, + Platform: platforms.Any(driver.platform), + }) } // Configure and validate the kernels security support. Note this is a Linux/FreeBSD @@ -1035,12 +1056,14 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // used above to run migration. They could be initialized in ImageService // if migration is called from daemon/images. layerStore might move as well. d.imageService = images.NewImageService(images.ImageServiceConfig{ + DefaultNamespace: ContainersNamespace, + DefaultPlatform: storageDrivers[0].platform, Client: d.containerdCli, ContainerStore: d.containers, DistributionMetadataStore: distributionMetadataStore, EventsService: d.EventsService, ImageStore: imageStore, - LayerStores: layerStores, + LayerBackends: backends, MaxConcurrentDownloads: *config.MaxConcurrentDownloads, MaxConcurrentUploads: *config.MaxConcurrentUploads, ReferenceStore: rs, diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 05c4e59e5acf0..6087d6ba9177c 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -2,9 +2,10 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "runtime" + "fmt" "sync" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" @@ -15,7 +16,6 @@ import ( buildcache "github.com/docker/docker/image/cache" "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -36,14 +36,18 @@ type cachedImage struct { } type cache struct { - m sync.RWMutex - // idCache maps Docker identifiers - idCache map[digest.Digest]*cachedImage - // tCache maps target digests to images - tCache map[digest.Digest]*cachedImage + m sync.RWMutex ids *digestset.Set targets *digestset.Set descriptors map[digest.Digest]ocispec.Descriptor + layers map[string]map[digest.Digest]layer.Layer + + // idCache maps Docker identifiers + // deprecated + idCache map[digest.Digest]*cachedImage + // tCache maps target digests to images + // deprecated + tCache map[digest.Digest]*cachedImage } func (c *cache) byID(id digest.Digest) *cachedImage { @@ -75,6 +79,7 @@ func (i *ImageService) LoadCache(ctx context.Context) error { if err != nil { return err } + log.G(ctx).WithField("namespace", namespace).Debugf("loading cache") _, err = i.loadNSCache(ctx, namespace) return err @@ -84,15 +89,46 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach i.cacheL.Lock() defer i.cacheL.Unlock() - c := &cache{ - idCache: map[digest.Digest]*cachedImage{}, - tCache: map[digest.Digest]*cachedImage{}, - ids: digestset.NewSet(), - targets: digestset.NewSet(), - descriptors: map[digest.Digest]ocispec.Descriptor{}, - } + var ( + cs = i.client.ContentStore() + is = i.client.ImageService() + c = &cache{ + targets: digestset.NewSet(), + descriptors: map[digest.Digest]ocispec.Descriptor{}, + layers: map[string]map[digest.Digest]layer.Layer{}, - is := i.client.ImageService() + // Deprecated + ids: digestset.NewSet(), + idCache: map[digest.Digest]*cachedImage{}, + tCache: map[digest.Digest]*cachedImage{}, + } + ) + + // Load layers + for _, backend := range i.layerBackends { + backendCache := map[digest.Digest]layer.Layer{} + name := backend.DriverName() + label := fmt.Sprintf("%s%s", LabelLayerPrefix, name) + err := cs.Walk(ctx, func(info content.Info) error { + value := digest.Digest(info.Labels[label]) + if _, ok := backendCache[value]; ok { + return nil + } + l, err := backend.Get(layer.ChainID(value)) + if err != nil { + log.G(ctx).WithError(err).WithField("digest", info.Digest).WithField("driver", name).Warnf("unable to get layer") + } else { + log.G(ctx).WithField("digest", info.Digest).WithField("driver", name).Debugf("retaining layer %s", value) + backendCache[value] = l + } + return nil + }, fmt.Sprintf("labels.%q", label)) + if err != nil { + return nil, err + } + + c.layers[name] = backendCache + } // TODO(containerd): This must use some streaming approach imgs, err := is.List(ctx) @@ -174,17 +210,18 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach ci.parent = pid } } - diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), ci.config) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to load image rootfs") - continue - } - // TODO(containerd): choose correct platform - ci.layer, err = i.layerStores[runtime.GOOS].Get(layer.ChainID(identity.ChainID(diffIDs))) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("no layer for image") - continue - } + //diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), ci.config) + //if err != nil { + // log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to load image rootfs") + // continue + //} + + //// TODO(containerd): choose correct platform + //ci.layer, err = i.backends[0].Get(layer.ChainID(identity.ChainID(diffIDs))) + //if err != nil { + // log.G(ctx).WithError(err).WithField("name", img.Name).Debug("no layer for image") + // continue + //} c.idCache[id.Digest] = ci c.ids.Add(id.Digest) diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 805968d75a169..3baf82f37bc33 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -18,7 +18,6 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -69,9 +68,10 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) } } - layerStore, ok := i.layerStores[c.ContainerOS] - if !ok { - return ocispec.Descriptor{}, system.ErrNotSupportedOperatingSystem + // TODO(containerd): get from container metadata + layerStore, err := i.getLayerStoreByOS(c.ContainerOS) + if err != nil { + return ocispec.Descriptor{}, err } rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) if err != nil { @@ -152,6 +152,7 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) CreatedAt: created, UpdatedAt: created, Labels: map[string]string{ + // TODO(containerd): name can be used to determine this LabelImageDangling: desc.Digest.String(), }, }) @@ -161,11 +162,19 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) } cache.m.Lock() + layerKey := digest.Digest(l.ChainID()) + if _, ok := cache.layers[layerStore.DriverName()][layerKey]; !ok { + cache.layers[layerStore.DriverName()][layerKey] = l + } else { + // Image already retained, don't hold onto layer + defer layer.ReleaseAndLog(layerStore, l) + } + + // TODO(containerd): remove this, no longer used if _, ok := cache.idCache[desc.Digest]; !ok { ci := &cachedImage{ config: desc, parent: digest.Digest(c.ParentImageID), - layer: l, } cache.idCache[desc.Digest] = ci @@ -180,9 +189,6 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) pci.m.Unlock() } } - } else { - // Image already exists, don't hold onto layer - defer layer.ReleaseAndLog(layerStore, l) } cache.m.Unlock() diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 95fb2dbb6e6c4..bae14e9959236 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -340,6 +340,8 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, } log.G(ctx).Debugf("%s: removing references", img.config.Digest) + // TODO(containerd): Get list of configs and ChainIDs + // Delete all repository tag/digest references to this image. records, err := i.removeImageRefs(ctx, img, repoRefs, true) if err != nil { @@ -349,6 +351,10 @@ func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, i.LogImageEvent(ctx, img.config.Digest.String(), img.config.Digest.String(), "delete") records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) + // TODO(containerd): lock cache + // TODO(containerd): get all cached layers for chain ids + // TODO(containerd): for each layer, check current containerd namespace for reference + // TODO(containerd): Snapshot integration will obsolete this section, // containerd's garbage collector can own the removal of the layer if img.layer != nil { diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 320e115a649c7..9b8b55fd71991 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -2,10 +2,10 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "encoding/json" "fmt" "io" "io/ioutil" - "runtime" "strings" "sync" "time" @@ -14,6 +14,8 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" @@ -66,11 +68,6 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor } func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - c, err := i.getCache(ctx) - if err != nil { - return err - } - progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) ongoing := newJobs(ref.Name()) pctx, stopProgress := context.WithCancel(ctx) @@ -90,12 +87,13 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - l layer.Layer - layers = map[digest.Digest][]ocispec.Descriptor{} - dlStatus = map[digest.Digest]bool{} - delayed = true // delayed unpack flag for schema 1 - lock = sync.Mutex{} - cond = sync.NewCond(&lock) + l layer.Layer + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + delayed = true // delayed unpack flag for schema 1 + lock = sync.Mutex{} + cond = sync.NewCond(&lock) + // TODO(containerd): replace this with errgroup unpackErr = make(chan error) ) // unpackHandler handles layer unpacking concurrently as soon as @@ -157,10 +155,14 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } // delayed unpacking for schema 1 if delayed { + // TODO(containerd): Just resolve config here + l, err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) if err != nil { return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) } + + // TODO(containerd): cache that layer? } else { // wait schema2 unpack to finish select { @@ -173,59 +175,46 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } } - // TODO: Unpack into layer store - // TODO: only unpack image types (does containerd already do this?) - - // TODO: Update image with ID label - // TODO(containerd): Create manifest reference and add image - - c.m.Lock() - ci, ok := c.idCache[config.Digest] - if ok { - ll := ci.layer - ci.layer = l - if ll != nil { - metadata, err := i.layerStores[runtime.GOOS].Release(ll) - if err != nil { - return errors.Wrap(err, "failed to release already retained layer") - } - layer.LogReleaseMetadata(metadata) - } - - ci.addReference(ref) - // TODO: Add manifest digest ref - } else { - ci = &cachedImage{ - config: config, - references: []reference.Named{ref}, - layer: l, - } - c.idCache[config.Digest] = ci - } - c.tCache[img.Target().Digest] = ci - c.m.Unlock() stopProgress() <-progress return err } -// TODO: Add shallow pull function which returns descriptor - func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor, progressOutput progress.Output, cond *sync.Cond, status map[digest.Digest]bool) (layer.Layer, error) { - var ( - cs = i.client.ContentStore() - ls = i.layerStores[runtime.GOOS] - ) + c, err := i.getCache(ctx) + if err != nil { + return nil, err + } - diffIDs, err := images.RootFS(ctx, cs, config) + cs := i.client.ContentStore() + p, err := content.ReadBlob(ctx, cs, config) if err != nil { - return nil, errors.Wrap(err, "failed to resolve rootfs") + return nil, err + } + + var cfg struct { + ocispec.Platform + + // RootFS references the layer content addresses used by the image. + RootFS ocispec.RootFS `json:"rootfs"` + } + + if err := json.Unmarshal(p, &cfg); err != nil { + return nil, errors.Wrap(err, "failed to parse config") } + + diffIDs := cfg.RootFS.DiffIDs if len(diffIDs) != len(layers) { return nil, errors.Errorf("mismatched image rootfs and manifest layers") } + // Resolve layerstore + ls, err := i.getLayerStore(platforms.Normalize(cfg.Platform)) + if err != nil { + return nil, err + } + var ( chain = []digest.Digest{} l layer.Layer @@ -242,9 +231,10 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la cond.L.Unlock() } - nl, err := i.applyLayer(ctx, layers[d], chain, progressOutput) + nl, err := i.applyLayer(ctx, ls, layers[d], chain, progressOutput) if err != nil { - logrus.Errorf("apply layer failed -> %s", err) + log.G(ctx).WithError(err).Errorf("apply layer failed") + layer.ReleaseAndLog(ls, l) return nil, errors.Wrapf(err, "failed to apply layer %d", d) } logrus.Debugf("Layer applied: chain=%s %s (%s)", nl.ChainID(), nl.DiffID(), diffIDs[d]) @@ -252,12 +242,16 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la if l != nil { metadata, err := ls.Release(l) if err != nil { + layer.ReleaseAndLog(ls, nl) return nil, errors.Wrap(err, "failed to release layer after apply") } layer.LogReleaseMetadata(metadata) } - // TODO(containerd): verify diff ID + if digest.Digest(l.DiffID()) != diffIDs[d] { + layer.ReleaseAndLog(ls, nl) + return nil, errors.Errorf("invalid diff id %s, expected %s", l.DiffID(), diffIDs[d]) + } l = nl } @@ -275,15 +269,28 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la return nil, errors.Wrap(err, "failed to update image config label") } + c.m.Lock() + blayers, ok := c.layers[ls.DriverName()] + if !ok { + blayers = map[digest.Digest]layer.Layer{} + c.layers[ls.DriverName()] = blayers + } + if ll, ok := blayers[digest.Digest(l.ChainID())]; ok { + metadata, err := ls.Release(ll) + if err != nil { + log.G(ctx).WithError(err).WithField("driver", ls.DriverName()).WithField("name", string(ll.ChainID())).Errorf("failed to release retained layer") + } else { + layer.LogReleaseMetadata(metadata) + } + } + blayers[digest.Digest(l.ChainID())] = l + c.m.Unlock() + return l, nil } -func (i *ImageService) applyLayer(ctx context.Context, blob ocispec.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { - var ( - cs = i.client.ContentStore() - ls = i.layerStores[runtime.GOOS] - ) - +func (i *ImageService) applyLayer(ctx context.Context, ls layer.Store, blob ocispec.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { + cs := i.client.ContentStore() l, err := ls.Get(layer.ChainID(identity.ChainID(layers))) if err == nil { return l, nil diff --git a/daemon/images/images.go b/daemon/images/images.go index 741d83fe75b65..136a84558536c 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -45,11 +45,6 @@ func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } // named all controls whether all images in the graph are filtered, or just // the heads. func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { - c, err := i.getCache(ctx) - if err != nil { - return nil, err - } - if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } @@ -65,7 +60,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al cs := i.client.ContentStore() var beforeFilter, sinceFilter *time.Time - err = imageFilters.WalkValues("before", func(value string) error { + err := imageFilters.WalkValues("before", func(value string) error { img, err := i.ResolveImage(ctx, value) if err != nil { return err @@ -140,7 +135,6 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al m := map[digest.Digest][]images.Image{} created := map[digest.Digest]time.Time{} - c.m.RLock() for _, img := range allImages { info, err := cs.Info(ctx, img.Target.Digest) if err != nil { @@ -228,7 +222,6 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al //images = append(images, newImage) } - c.m.RUnlock() imageSums := []*types.ImageSummary{} //var layerRefs map[layer.ChainID]int diff --git a/daemon/images/service.go b/daemon/images/service.go index d5d9cdc66a4b7..04f37f3a88793 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -2,11 +2,12 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "fmt" "os" - "runtime" "sync" "github.com/containerd/containerd" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" @@ -17,10 +18,12 @@ import ( "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -34,53 +37,102 @@ type containerStore interface { Get(string) *container.Container } +type LayerBackend struct { + layer.Store + Platform platforms.Matcher +} + // ImageServiceConfig is the configuration used to create a new ImageService type ImageServiceConfig struct { DefaultNamespace string + DefaultPlatform ocispec.Platform Client *containerd.Client ContainerStore containerStore DistributionMetadataStore metadata.Store EventsService *daemonevents.Events - ImageStore image.Store - LayerStores map[string]layer.Store + LayerBackends []LayerBackend MaxConcurrentDownloads int MaxConcurrentUploads int - ReferenceStore dockerreference.Store - RegistryService registry.Service + + // deprecated + ImageStore image.Store + // deprecated + ReferenceStore dockerreference.Store + // deprecated + RegistryService registry.Service } // NewImageService returns a new ImageService from a configuration func NewImageService(config ImageServiceConfig) *ImageService { logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) + + var pc orderedPlatformComparer + layerStores := map[string]layer.Store{} + for _, backend := range config.LayerBackends { + pc.matchers = append(pc.matchers, backend.Platform) + layerStores[backend.DriverName()] = backend.Store + } + // TODO(containerd): Store backends by name and ordered return &ImageService{ - namespace: config.DefaultNamespace, - client: config.Client, - containers: config.ContainerStore, + namespace: config.DefaultNamespace, + defaultPlatform: config.DefaultPlatform, + platforms: pc, + client: config.Client, + containers: config.ContainerStore, + cache: map[string]*cache{}, + eventsService: config.EventsService, + layerBackends: config.LayerBackends, + layerStores: layerStores, + distributionMetadataStore: config.DistributionMetadataStore, - downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), - cache: map[string]*cache{}, - eventsService: config.EventsService, imageStore: config.ImageStore, - layerStores: config.LayerStores, referenceStore: config.ReferenceStore, registryService: config.RegistryService, - uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), + //downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), + //uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), + } +} + +// TODO(containerd): add upstream constructor +type orderedPlatformComparer struct { + matchers []platforms.Matcher +} + +func (c orderedPlatformComparer) Match(platform ocispec.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} - // TODO(containerd): derive from configured layerstores - platforms: platforms.Ordered(platforms.DefaultSpec()), +func (c orderedPlatformComparer) Less(p1 ocispec.Platform, p2 ocispec.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } } + return false } // ImageService provides a backend for image management type ImageService struct { - namespace string - client *containerd.Client - containers containerStore - eventsService *daemonevents.Events - layerStores map[string]layer.Store // By operating system - platforms platforms.MatchComparer - pruneRunning int32 + namespace string + defaultPlatform ocispec.Platform + client *containerd.Client + containers containerStore + eventsService *daemonevents.Events + layerStores map[string]layer.Store + layerBackends []LayerBackend + platforms platforms.MatchComparer + pruneRunning int32 // namespaced cache cache map[string]*cache @@ -105,11 +157,12 @@ type DistributionServices struct { } // DistributionServices return services controlling daemon image storage +// deprecated: use containerd client func (i *ImageService) DistributionServices() DistributionServices { return DistributionServices{ DownloadManager: i.downloadManager, V2MetadataService: metadata.NewV2MetadataService(i.distributionMetadataStore), - LayerStore: i.layerStores[runtime.GOOS], + LayerStore: i.layerBackends[0].Store, ImageStore: i.imageStore, ReferenceStore: i.referenceStore, } @@ -118,34 +171,30 @@ func (i *ImageService) DistributionServices() DistributionServices { // CountImages returns the number of images stored by ImageService // called from info.go func (i *ImageService) CountImages(ctx context.Context) (int, error) { - c, err := i.getCache(ctx) + is := i.client.ImageService() + imgs, err := is.List(ctx) if err != nil { return 0, err } - c.m.RLock() - l := len(c.idCache) - c.m.RUnlock() - - return l, nil + return len(imgs), nil } // ChildrenByID returns the children image digests for a parent image. // called from list.go to filter containers func (i *ImageService) ChildrenByID(ctx context.Context, id digest.Digest) ([]digest.Digest, error) { - c, err := i.getCache(ctx) + cs := i.client.ContentStore() + + var children []digest.Digest + err := cs.Walk(ctx, func(info content.Info) error { + children = append(children, info.Digest) + return nil + }, fmt.Sprintf("labels.%q==%s", LabelImageParent, id.String())) if err != nil { return nil, err } - c.m.RLock() - ci, ok := c.idCache[id] - c.m.RUnlock() - if !ok { - return nil, nil - } - - return ci.children, nil + return children, nil } // GetImageBackend returns the storage backend used by the given image @@ -156,28 +205,48 @@ func (i *ImageService) GetImageBackend(image RuntimeImage) (layer.Store, error) // TODO(containerd): Lookup by layer store names } if image.Platform.OS == "" { - image.Platform = platforms.DefaultSpec() + image.Platform = i.defaultPlatform + } + for _, backend := range i.layerBackends { + if backend.Platform.Match(image.Platform) { + return backend.Store, nil + } } - ls, ok := i.layerStores[image.Platform.OS] - if !ok { - return nil, errdefs.Unavailable(errors.Errorf("no storage backend configured for %s", image.Platform.OS)) + + return nil, errdefs.System(errors.Wrapf(system.ErrNotSupportedOperatingSystem, "no layer storage backend configured for %s", image.Platform.OS)) +} + +func (i *ImageService) getLayerStore(platform ocispec.Platform) (layer.Store, error) { + for _, backend := range i.layerBackends { + if backend.Platform.Match(platform) { + return backend.Store, nil + } } - return ls, nil + return nil, errdefs.Unavailable(errors.Errorf("no layer storage backend configured for %s", platform.OS)) +} + +func (i *ImageService) getLayerStoreByOS(os string) (layer.Store, error) { + return i.getLayerStore(ocispec.Platform{OS: os}) } // GetLayerByID returns a layer by ID and operating system // called from daemon.go Daemon.restore(), and Daemon.containerExport() func (i *ImageService) GetLayerByID(cid string, os string) (layer.RWLayer, error) { - return i.layerStores[os].GetRWLayer(cid) + ls, err := i.getLayerStoreByOS(os) + if err != nil { + return nil, err + } + + return ls.GetRWLayer(cid) } // LayerStoreStatus returns the status for each layer store // called from info.go func (i *ImageService) LayerStoreStatus() map[string][][2]string { result := make(map[string][][2]string) - for os, store := range i.layerStores { - result[os] = store.DriverStatus() + for _, backend := range i.layerBackends { + result[backend.DriverName()] = backend.DriverStatus() } return result } @@ -187,17 +256,20 @@ func (i *ImageService) LayerStoreStatus() map[string][][2]string { // TODO: needs to be refactored to Unmount (see callers), or removed and replaced // with GetLayerByID func (i *ImageService) GetLayerMountID(cid string, os string) (string, error) { - return i.layerStores[os].GetMountID(cid) + ls, err := i.getLayerStoreByOS(os) + if err != nil { + return "", err + } + + return ls.GetMountID(cid) } // Cleanup resources before the process is shutdown. // called from daemon.go Daemon.Shutdown() func (i *ImageService) Cleanup() { - for os, ls := range i.layerStores { - if ls != nil { - if err := ls.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) - } + for _, backend := range i.layerBackends { + if err := backend.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, backend.DriverName()) } } } @@ -207,13 +279,23 @@ func (i *ImageService) Cleanup() { // - newContainer // - to report an error in Daemon.Mount(container) func (i *ImageService) GraphDriverForOS(os string) string { - return i.layerStores[os].DriverName() + ls, err := i.getLayerStoreByOS(os) + if err != nil { + // TODO(containerd): more graceful return is possible + panic(err) + } + + return ls.DriverName() } // ReleaseLayer releases a layer allowing it to be removed // called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) error { - metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer) + ls, err := i.getLayerStoreByOS(containerOS) + if err != nil { + return err + } + metadata, err := ls.ReleaseRWLayer(rwlayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { return errors.Wrapf(err, "driver %q failed to remove root filesystem", From 1b9f56171a8640b585ec7cf80c9db2f91a00da87 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Sun, 3 Mar 2019 23:33:42 -0800 Subject: [PATCH 29/73] Update pull to use error groups Support for multiple unpacks Signed-off-by: Derek McGowan --- daemon/images/image_pull.go | 114 ++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 9b8b55fd71991..31bd3a5cd6d54 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "strings" "sync" + "sync/atomic" "time" "github.com/containerd/containerd" @@ -29,6 +30,7 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" ) // default maximum concurrent downloads allowed during docker pull @@ -87,15 +89,14 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - l layer.Layer - layers = map[digest.Digest][]ocispec.Descriptor{} - dlStatus = map[digest.Digest]bool{} - delayed = true // delayed unpack flag for schema 1 - lock = sync.Mutex{} - cond = sync.NewCond(&lock) - // TODO(containerd): replace this with errgroup - unpackErr = make(chan error) + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + unpacks int32 = 0 // how many unpacks occurred + lock = sync.Mutex{} + cond = sync.NewCond(&lock) ) + grp, pctx := errgroup.WithContext(pctx) + // unpackHandler handles layer unpacking concurrently as soon as // a layer has been downloaded in order unpackHandler := func(h images.Handler) images.Handler { @@ -107,17 +108,19 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - lock.Lock() - // map the config to layers - layers[children[0].Digest] = children[1:] - lock.Unlock() + // TODO(container): remove layer children if not on a configured platform + if len(children) > 1 { + lock.Lock() + // map the config to layers + layers[children[0].Digest] = children[1:] + lock.Unlock() + } case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - // handle schema2 unpack concurrently - delayed = false - go func() { - l, err = i.unpack(pctx, desc, layers[desc.Digest], progressOutput, cond, dlStatus) - unpackErr <- errors.Wrapf(err, "failed to unpack %s", desc.Digest) - }() + // TODO(containerd): only start unpack if on a configured platform + atomic.AddInt32(&unpacks, 1) + grp.Go(func() error { + return i.unpack(pctx, desc, layers[desc.Digest], progressOutput, cond, dlStatus) + }) case images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Layer, ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer: // a layer has been downloaded, signal downloaded status @@ -131,47 +134,44 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) } - opts := []containerd.RemoteOpt{ - containerd.WithImageHandler(h), - containerd.WithImageHandlerWrapper(unpackHandler), - containerd.WithMaxConcurrentDownloads(defaultMaxConcurrentDownloads), + pctx, done, err := i.client.WithLease(pctx) + if err != nil { + return err } - // TODO: Lease - // TODO: Custom resolver + defer done(pctx) + + // TODO(containerd): Custom resolver // - Auth config // - Custom headers - // TODO: Platforms using `platform` + // TODO(containerd): Platforms using a passed in `platform` // TODO(containerd): progress tracking - // TODO: unpack tracking, use download manager for now? + opts := []containerd.RemoteOpt{ + containerd.WithImageHandler(h), + containerd.WithImageHandlerWrapper(unpackHandler), + containerd.WithMaxConcurrentDownloads(defaultMaxConcurrentDownloads), + } img, err := i.client.Pull(pctx, ref.String(), opts...) if err != nil { return errors.Wrap(err, "failed to pull image") } - config, err := img.Config(pctx) - if err != nil { - return errors.Wrap(err, "failed to pull image") - } - // delayed unpacking for schema 1 - if delayed { - // TODO(containerd): Just resolve config here - l, err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) + if unpacks > 0 { + if err := grp.Wait(); err != nil { + return err + } + } else { + // try to resolve config to unpack if none was done previously + // schema 1 must be resolved and unpacked after pull + config, err := img.Config(pctx) if err != nil { - return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) + return errors.Wrap(err, "failed to resolve image config for unpack") } - // TODO(containerd): cache that layer? - } else { - // wait schema2 unpack to finish - select { - case <-pctx.Done(): - return errors.New("pull context cancelled") - case err = <-unpackErr: - if err != nil { - return err - } + err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) + if err != nil { + return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) } } @@ -181,16 +181,16 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return err } -func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor, progressOutput progress.Output, cond *sync.Cond, status map[digest.Digest]bool) (layer.Layer, error) { +func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor, progressOutput progress.Output, cond *sync.Cond, status map[digest.Digest]bool) error { c, err := i.getCache(ctx) if err != nil { - return nil, err + return err } cs := i.client.ContentStore() p, err := content.ReadBlob(ctx, cs, config) if err != nil { - return nil, err + return err } var cfg struct { @@ -201,18 +201,18 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la } if err := json.Unmarshal(p, &cfg); err != nil { - return nil, errors.Wrap(err, "failed to parse config") + return errors.Wrap(err, "failed to parse config") } diffIDs := cfg.RootFS.DiffIDs if len(diffIDs) != len(layers) { - return nil, errors.Errorf("mismatched image rootfs and manifest layers") + return errors.Errorf("mismatched image rootfs and manifest layers") } // Resolve layerstore ls, err := i.getLayerStore(platforms.Normalize(cfg.Platform)) if err != nil { - return nil, err + return err } var ( @@ -235,7 +235,7 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la if err != nil { log.G(ctx).WithError(err).Errorf("apply layer failed") layer.ReleaseAndLog(ls, l) - return nil, errors.Wrapf(err, "failed to apply layer %d", d) + return errors.Wrapf(err, "failed to apply layer %d", d) } logrus.Debugf("Layer applied: chain=%s %s (%s)", nl.ChainID(), nl.DiffID(), diffIDs[d]) @@ -243,14 +243,14 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la metadata, err := ls.Release(l) if err != nil { layer.ReleaseAndLog(ls, nl) - return nil, errors.Wrap(err, "failed to release layer after apply") + return errors.Wrap(err, "failed to release layer after apply") } layer.LogReleaseMetadata(metadata) } - if digest.Digest(l.DiffID()) != diffIDs[d] { + if digest.Digest(nl.DiffID()) != diffIDs[d] { layer.ReleaseAndLog(ls, nl) - return nil, errors.Errorf("invalid diff id %s, expected %s", l.DiffID(), diffIDs[d]) + return errors.Errorf("invalid diff id %s, expected %s", nl.DiffID(), diffIDs[d]) } l = nl @@ -266,7 +266,7 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la if _, err := cs.Update(ctx, info, "labels."+key); err != nil { layer.ReleaseAndLog(ls, l) - return nil, errors.Wrap(err, "failed to update image config label") + return errors.Wrap(err, "failed to update image config label") } c.m.Lock() @@ -286,7 +286,7 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la blayers[digest.Digest(l.ChainID())] = l c.m.Unlock() - return l, nil + return nil } func (i *ImageService) applyLayer(ctx context.Context, ls layer.Store, blob ocispec.Descriptor, layers []digest.Digest, progressOutput progress.Output) (layer.Layer, error) { From ba80e45e402e0242446a47eff89d5839561a73a1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 4 Mar 2019 16:18:56 -0800 Subject: [PATCH 30/73] Lookup images by digest regexp Filter pull artifacts by platform while pulling all manifests and configs. Signed-off-by: Derek McGowan --- daemon/commit.go | 2 + daemon/images/cache.go | 3 -- daemon/images/image.go | 42 ++++++++++++++----- daemon/images/image_commit.go | 1 + daemon/images/image_inspect.go | 73 +++++++++++++++++++++------------- daemon/images/image_pull.go | 62 ++++++++++++++++++++--------- daemon/images/image_tag.go | 3 +- daemon/images/images.go | 5 ++- 8 files changed, 128 insertions(+), 63 deletions(-) diff --git a/daemon/commit.go b/daemon/commit.go index ba6950f27e458..62ca39fa03301 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -171,6 +171,8 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, var imageRef string if c.Repo != "" { + // TODO(containerd): Pass this into commit image? + // Commit image must retain the image through a name or `none@` tag imageRef, err = daemon.imageService.TagImage(ctx, string(desc.Digest), c.Repo, c.Tag) if err != nil { return "", err diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 6087d6ba9177c..f12435f6c78a6 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -38,7 +38,6 @@ type cachedImage struct { type cache struct { m sync.RWMutex ids *digestset.Set - targets *digestset.Set descriptors map[digest.Digest]ocispec.Descriptor layers map[string]map[digest.Digest]layer.Layer @@ -93,7 +92,6 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach cs = i.client.ContentStore() is = i.client.ImageService() c = &cache{ - targets: digestset.NewSet(), descriptors: map[digest.Digest]ocispec.Descriptor{}, layers: map[string]map[digest.Digest]layer.Layer{}, @@ -227,7 +225,6 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach c.ids.Add(id.Digest) } c.tCache[img.Target.Digest] = ci - c.targets.Add(img.Target.Digest) c.descriptors[img.Target.Digest] = img.Target // Load image layer to prevent removal diff --git a/daemon/images/image.go b/daemon/images/image.go index 9df3a3b8f63ff..7f65a3eb941e7 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -30,6 +30,9 @@ const ( // LabelImageDangling refers to images with no name // Stored on images and points to the image config digest + // TODO(containerd): Deprecate this, use name@hash approach + // to hold onto images and avoid calculating the dangling + // property after every retag LabelImageDangling = "docker.io/image.dangling" // LabelLayerPrefix is used as the label prefix for layer stores @@ -106,21 +109,40 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe return imgs[0].Target, nil } - img, err := is.Get(ctx, namedRef.String()) - if err != nil { - if !cerrdefs.IsNotFound(err) { - return ocispec.Descriptor{}, err + // TODO(containerd): If namedRef matches COULD be interpreted as a + // digest prefer, do a lookup via `is.List` instead + // with an or clause + // TODO(containerd): Ensure named only + ref := namedRef.String() + if len(refOrID) < 64 { + filters := []string{ + fmt.Sprintf("name==%q", namedRef.String()), + fmt.Sprintf(`target.digest~="sha256:%s[0-9a-fA-F]{%d}"`, refOrID, 64-len(refOrID)), } - dgst, err := c.targets.Lookup(refOrID) + imgs, err := is.List(ctx, filters...) if err != nil { - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("reference not found")) + return ocispec.Descriptor{}, err } - - desc, ok := c.descriptors[dgst] - if ok { - return desc, nil + if len(imgs) == 1 { + return imgs[0].Target, nil + } + if len(imgs) == 0 { + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("list returned no images")) + } + for _, img := range imgs { + if img.Name == ref { + return img.Target, nil + } } + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("ambiguous reference")) + } + img, err := is.Get(ctx, namedRef.String()) + if err != nil { + // TODO(containerd): Translate error directly + if !cerrdefs.IsNotFound(err) { + return ocispec.Descriptor{}, err + } return ocispec.Descriptor{}, errdefs.NotFound(errors.New("id not found")) } diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 3baf82f37bc33..928b869aa366b 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -147,6 +147,7 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) // Create a dangling image _, err = i.client.ImageService().Create(ctx, images.Image{ + // TODO(containerd): Add a name component here Name: desc.Digest.String(), Target: desc, CreatedAt: created, diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index 78607f2e52c98..5399f1de159af 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -3,15 +3,15 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "encoding/json" - "runtime" "time" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" containertype "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/layer" "github.com/docker/go-connections/nat" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -21,25 +21,41 @@ import ( // LookupImage looks up an image by name and returns it as an ImageInspect // structure. func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.ImageInspect, error) { - ci, err := i.getCachedRef(ctx, name) + desc, err := i.ResolveImage(ctx, name) if err != nil { return nil, err } repoTags := []string{} repoDigests := []string{} - for _, ref := range ci.references { + imgs, err := i.client.ImageService().List(ctx, "target.digest=="+desc.Digest.String()) + if err != nil { + return nil, err + } + for _, img := range imgs { + // Parse name + ref, err := reference.Parse(img.Name) + if err != nil { + log.G(ctx).WithError(err).WithField("target", desc.Digest.String()).Warnf("skipping reference %q", img.Name) + continue + } switch ref.(type) { - case reference.NamedTagged: - repoTags = append(repoTags, reference.FamiliarString(ref)) - // TODO(containerd): these references may need to come from - // metadata used for cross repository push case reference.Canonical: repoDigests = append(repoDigests, reference.FamiliarString(ref)) + case reference.NamedTagged: + repoTags = append(repoTags, reference.FamiliarString(ref)) } } - p, err := content.ReadBlob(ctx, i.client.ContentStore(), ci.config) + cs := i.client.ContentStore() + + config, err := images.Config(ctx, cs, desc, i.platforms) + if err != nil { + log.G(ctx).WithError(err).Debugf("resolve failed") + return nil, errors.Wrap(err, "failed to resolve config") + } + + p, err := content.ReadBlob(ctx, cs, config) if err != nil { return nil, errors.Wrap(err, "failed to read config") } @@ -66,20 +82,21 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima var layerMetadata map[string]string layerID := identity.ChainID(img.RootFS.DiffIDs) if layerID != "" { - l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(layerID)) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) - size, err = l.Size() - if err != nil { - return nil, err - } - - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } + // Read layer store from labels + //l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(layerID)) + //if err != nil { + // return nil, err + //} + //defer layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) + //size, err = l.Size() + //if err != nil { + // return nil, err + //} + + //layerMetadata, err = l.Metadata() + //if err != nil { + // return nil, err + //} } comment := img.Comment @@ -94,10 +111,10 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima //} imageInspect := &types.ImageInspect{ - ID: ci.config.Digest.String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: ci.parent.String(), + ID: desc.Digest.String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + //Parent: ci.parent.String(), Comment: comment, Created: img.Created.Format(time.RFC3339Nano), DockerVersion: img.DockerVersion, @@ -115,7 +132,7 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima //}, } - imageInspect.GraphDriver.Name = i.layerStores[runtime.GOOS].DriverName() + //imageInspect.GraphDriver.Name = i.layerStores[runtime.GOOS].DriverName() imageInspect.GraphDriver.Data = layerMetadata return imageInspect, nil diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 31bd3a5cd6d54..b072f1c0374b0 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -89,11 +89,12 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - layers = map[digest.Digest][]ocispec.Descriptor{} - dlStatus = map[digest.Digest]bool{} - unpacks int32 = 0 // how many unpacks occurred - lock = sync.Mutex{} - cond = sync.NewCond(&lock) + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + unpackDesc = map[digest.Digest]struct{}{} + unpacks int32 = 0 // how many unpacks occurred + lock = sync.Mutex{} + cond = sync.NewCond(&lock) ) grp, pctx := errgroup.WithContext(pctx) @@ -107,20 +108,41 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } switch desc.MediaType { + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + lock.Lock() + var unknown []ocispec.Descriptor + for _, d := range children { + if d.Platform == nil { + unknown = append(unknown, d) + } else if i.platforms.Match(*d.Platform) { + unpackDesc[d.Digest] = struct{}{} + } + } + if len(unpackDesc) == 0 && len(unknown) > 0 { + unpackDesc[unknown[0].Digest] = struct{}{} + } + lock.Unlock() case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - // TODO(container): remove layer children if not on a configured platform + lock.Lock() + if _, ok := unpackDesc[desc.Digest]; !ok { + children = children[:1] + + } if len(children) > 1 { - lock.Lock() // map the config to layers layers[children[0].Digest] = children[1:] - lock.Unlock() } + lock.Unlock() case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - // TODO(containerd): only start unpack if on a configured platform - atomic.AddInt32(&unpacks, 1) - grp.Go(func() error { - return i.unpack(pctx, desc, layers[desc.Digest], progressOutput, cond, dlStatus) - }) + lock.Lock() + unpackLayers := layers[desc.Digest] + lock.Unlock() + if len(unpackLayers) > 0 { + atomic.AddInt32(&unpacks, 1) + grp.Go(func() error { + return i.unpack(pctx, desc, unpackLayers, progressOutput, cond, dlStatus) + }) + } case images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Layer, ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer: // a layer has been downloaded, signal downloaded status @@ -144,15 +166,13 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference // TODO(containerd): Custom resolver // - Auth config // - Custom headers - // TODO(containerd): Platforms using a passed in `platform` - // TODO(containerd): progress tracking opts := []containerd.RemoteOpt{ containerd.WithImageHandler(h), containerd.WithImageHandlerWrapper(unpackHandler), containerd.WithMaxConcurrentDownloads(defaultMaxConcurrentDownloads), } - img, err := i.client.Pull(pctx, ref.String(), opts...) + img, err := i.client.Fetch(pctx, ref.String(), opts...) if err != nil { return errors.Wrap(err, "failed to pull image") } @@ -164,17 +184,19 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } else { // try to resolve config to unpack if none was done previously // schema 1 must be resolved and unpacked after pull - config, err := img.Config(pctx) + config, err := img.Config(pctx, i.client.ContentStore(), i.platforms) if err != nil { return errors.Wrap(err, "failed to resolve image config for unpack") } err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) if err != nil { - return errors.Wrapf(err, "failed to unpack %s", img.Target().Digest) + return errors.Wrapf(err, "failed to unpack %s", img.Target.Digest) } } + // TODO(containerd): Tag name@hash to hold for dangling image case + stopProgress() <-progress @@ -234,7 +256,9 @@ func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, la nl, err := i.applyLayer(ctx, ls, layers[d], chain, progressOutput) if err != nil { log.G(ctx).WithError(err).Errorf("apply layer failed") - layer.ReleaseAndLog(ls, l) + if l != nil { + layer.ReleaseAndLog(ls, l) + } return errors.Wrapf(err, "failed to apply layer %d", d) } logrus.Debugf("Layer applied: chain=%s %s (%s)", nl.ChainID(), nl.DiffID(), diffIDs[d]) diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 1a71c310f32d8..8dfc70dc92885 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -34,6 +34,7 @@ func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag } img.name = newTag + // TODO(containerd): Also add image for name@digest err = i.tagImage(ctx, img) return reference.FamiliarString(newTag), err } @@ -48,7 +49,7 @@ func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec if ci == nil { return errdefs.NotFound(errors.New("target not found")) } - + // TODO(containerd): Also add image for name@digest return i.tagImage(ctx, imageLink{ name: newTag, target: &target, diff --git a/daemon/images/images.go b/daemon/images/images.go index 136a84558536c..7e721d09ff5e8 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -238,11 +238,12 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al switch target.MediaType { case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + config = target default: - // TODO(containerd): Set this more globally and to - // an appropriate value for Windows + // TODO(containerd): use global platforms matcher platform := platforms.Default() + // TODO(containerd): config matcher which ignores NotFound items? desc, err := images.Config(ctx, cs, imgs[0].Target, platform) if err != nil { log.G(ctx).WithError(err).WithField("image", dgst.String()).Warnf("unable to resolve config") From 705fc4f7e493b27f709091797e0b6d439b206875 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 4 Mar 2019 16:53:12 -0800 Subject: [PATCH 31/73] Use content label for inspecting layer store Signed-off-by: Derek McGowan --- daemon/images/image_inspect.go | 90 +++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 39 deletions(-) diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index 5399f1de159af..c6674f8cf358b 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -3,6 +3,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "encoding/json" + "strings" "time" "github.com/containerd/containerd/content" @@ -12,6 +13,7 @@ import ( "github.com/docker/docker/api/types" containertype "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/layer" "github.com/docker/go-connections/nat" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -26,6 +28,7 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima return nil, err } + lastUpdated := time.Unix(0, 0) repoTags := []string{} repoDigests := []string{} imgs, err := i.client.ImageService().List(ctx, "target.digest=="+desc.Digest.String()) @@ -45,16 +48,25 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima case reference.NamedTagged: repoTags = append(repoTags, reference.FamiliarString(ref)) } + if img.UpdatedAt.After(lastUpdated) { + lastUpdated = img.UpdatedAt + } } cs := i.client.ContentStore() config, err := images.Config(ctx, cs, desc, i.platforms) if err != nil { - log.G(ctx).WithError(err).Debugf("resolve failed") + // TODO(containerd): handle case where config fails to resume + // due to missing data caused by multiple matches return nil, errors.Wrap(err, "failed to resolve config") } + info, err := cs.Info(ctx, config.Digest) + if err != nil { + return nil, errors.Wrap(err, "failed to get config info") + } + p, err := content.ReadBlob(ctx, cs, config) if err != nil { return nil, errors.Wrap(err, "failed to read config") @@ -80,23 +92,31 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima var size int64 var layerMetadata map[string]string - layerID := identity.ChainID(img.RootFS.DiffIDs) - if layerID != "" { - // Read layer store from labels - //l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(layerID)) - //if err != nil { - // return nil, err - //} - //defer layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) - //size, err = l.Size() - //if err != nil { - // return nil, err - //} - - //layerMetadata, err = l.Metadata() - //if err != nil { - // return nil, err - //} + var lsname string + if layerID := identity.ChainID(img.RootFS.DiffIDs); layerID != "" { + for k, v := range info.Labels { + if strings.HasPrefix(k, LabelLayerPrefix) && v == string(layerID) { + lsname = k[len(LabelLayerPrefix):] + } else { + log.G(ctx).Debugf("not the label: %q => %q", k, v) + } + } + if ls, ok := i.layerStores[lsname]; ok { + l, err := ls.Get(layer.ChainID(layerID)) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(ls, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } } comment := img.Comment @@ -104,17 +124,11 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima comment = img.History[len(img.History)-1].Comment } - // TODO(containerd): Get from label? - //lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) - //if err != nil { - // return nil, err - //} - - imageInspect := &types.ImageInspect{ - ID: desc.Digest.String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - //Parent: ci.parent.String(), + return &types.ImageInspect{ + ID: desc.Digest.String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: info.Labels[LabelImageParent], Comment: comment, Created: img.Created.Format(time.RFC3339Nano), DockerVersion: img.DockerVersion, @@ -126,16 +140,14 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima Size: size, VirtualSize: size, // TODO: field unused, deprecate RootFS: rootFSToAPIType(img.RootFS), - // TODO(containerd): Get from labels? - //Metadata: types.ImageMetadata{ - // LastTagTime: lastUpdated, - //}, - } - - //imageInspect.GraphDriver.Name = i.layerStores[runtime.GOOS].DriverName() - imageInspect.GraphDriver.Data = layerMetadata - - return imageInspect, nil + Metadata: types.ImageMetadata{ + LastTagTime: lastUpdated, + }, + GraphDriver: types.GraphDriverData{ + Name: lsname, + Data: layerMetadata, + }, + }, nil } func rootFSToAPIType(rootfs ocispec.RootFS) types.RootFS { From 84dfd57c76a1dc920c9b09898bca791ba451ec1c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 4 Mar 2019 17:09:35 -0800 Subject: [PATCH 32/73] Update pull to add canonical tag Update tag to avoid deprecate cache Signed-off-by: Derek McGowan --- daemon/images/image_pull.go | 11 +++++++++- daemon/images/image_tag.go | 40 ++++++++----------------------------- 2 files changed, 18 insertions(+), 33 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index b072f1c0374b0..0e3d4ccd365f9 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -195,7 +195,16 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } } - // TODO(containerd): Tag name@hash to hold for dangling image case + c, err := reference.WithDigest(ref, img.Target.Digest) + if err != nil { + return errors.Wrap(err, "failed to create digest ref") + } + + img.Name = c.String() + _, err = i.client.ImageService().Create(ctx, img) + if err != nil { + return errors.Wrap(err, "failed to save canonical image") + } stopProgress() <-progress diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 8dfc70dc92885..e8ca8ff676f13 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -5,7 +5,6 @@ import ( "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" - "github.com/docker/docker/errdefs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -13,16 +12,11 @@ import ( // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { - img, err := i.getImageByRef(ctx, imageName) + desc, err := i.ResolveImage(ctx, imageName) if err != nil { return "", err } - if img.target == nil { - // TODO(containerd): Choose a better target based on other references? - img.target = &img.cached.config - } - newTag, err := reference.ParseNormalizedNamed(repository) if err != nil { return "", err @@ -32,43 +26,25 @@ func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag return "", err } } - img.name = newTag - // TODO(containerd): Also add image for name@digest - err = i.tagImage(ctx, img) + err = i.TagImageWithReference(ctx, desc, newTag) return reference.FamiliarString(newTag), err } // TagImageWithReference adds the given reference to the image ID provided. func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec.Descriptor, newTag reference.Named) error { - c, err := i.getCache(ctx) - if err != nil { - return err - } - ci := c.byTarget(target.Digest) - if ci == nil { - return errdefs.NotFound(errors.New("target not found")) - } - // TODO(containerd): Also add image for name@digest - return i.tagImage(ctx, imageLink{ - name: newTag, - target: &target, - cached: ci, - }) -} - -func (i *ImageService) tagImage(ctx context.Context, img imageLink) error { im := images.Image{ - Name: img.name.String(), - Target: *img.target, + Name: newTag.String(), + Target: target, } + is := i.client.ImageService() _, err := is.Create(ctx, im) if err != nil { return errors.Wrap(err, "failed to create image") } - // TODO(containerd): Set last updated for target - i.LogImageEvent(ctx, img.target.Digest.String(), reference.FamiliarString(img.name), "tag") - return i.updateCache(ctx, img) + i.LogImageEvent(ctx, target.Digest.String(), reference.FamiliarString(newTag), "tag") + + return nil } From 501622f5817a1ca03b0b86952ec8ff4e7d71f64c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 11 Mar 2019 14:46:16 -0700 Subject: [PATCH 33/73] Add test framework which sets up containerd Add initial test for listing images Signed-off-by: Derek McGowan --- daemon/images/generators_test.go | 185 ++++++ daemon/images/images.go | 35 +- daemon/images/images_test.go | 206 ++++++ daemon/images/service.go | 19 +- daemon/images/service_test.go | 249 +++++++ daemon/images/service_unix_test.go | 45 ++ .../containerd/containerd/diff/apply/apply.go | 128 ++++ .../containerd/diff/walking/differ.go | 181 ++++++ .../containerd/diff/walking/plugin/plugin.go | 55 ++ .../containerd/gc/scheduler/scheduler.go | 356 ++++++++++ .../containerd/services/containers/helpers.go | 70 ++ .../containerd/services/containers/local.go | 243 +++++++ .../containerd/services/containers/service.go | 109 ++++ .../containerd/services/diff/local.go | 179 ++++++ .../containerd/services/diff/service.go | 71 ++ .../containerd/services/diff/service_unix.go | 23 + .../services/diff/service_windows.go | 23 + .../containerd/services/images/helpers.go | 70 ++ .../containerd/services/images/local.go | 180 ++++++ .../containerd/services/images/service.go | 84 +++ .../containerd/services/leases/local.go | 109 ++++ .../containerd/services/leases/service.go | 122 ++++ .../containerd/services/namespaces/local.go | 223 +++++++ .../containerd/services/namespaces/service.go | 84 +++ .../containerd/services/server/server.go | 394 ++++++++++++ .../services/server/server_linux.go | 55 ++ .../services/server/server_solaris.go | 27 + .../services/server/server_unsupported.go | 29 + .../services/server/server_windows.go | 29 + .../containerd/services/snapshots/service.go | 317 +++++++++ .../services/snapshots/snapshotters.go | 98 +++ .../containerd/snapshots/native/native.go | 348 ++++++++++ .../containerd/snapshots/storage/bolt.go | 606 ++++++++++++++++++ .../containerd/snapshots/storage/metastore.go | 115 ++++ 34 files changed, 5038 insertions(+), 29 deletions(-) create mode 100644 daemon/images/generators_test.go create mode 100644 daemon/images/images_test.go create mode 100644 daemon/images/service_test.go create mode 100644 daemon/images/service_unix_test.go create mode 100644 vendor/github.com/containerd/containerd/diff/apply/apply.go create mode 100644 vendor/github.com/containerd/containerd/diff/walking/differ.go create mode 100644 vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go create mode 100644 vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/helpers.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/local.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/service.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/local.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service_unix.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service_windows.go create mode 100644 vendor/github.com/containerd/containerd/services/images/helpers.go create mode 100644 vendor/github.com/containerd/containerd/services/images/local.go create mode 100644 vendor/github.com/containerd/containerd/services/images/service.go create mode 100644 vendor/github.com/containerd/containerd/services/leases/local.go create mode 100644 vendor/github.com/containerd/containerd/services/leases/service.go create mode 100644 vendor/github.com/containerd/containerd/services/namespaces/local.go create mode 100644 vendor/github.com/containerd/containerd/services/namespaces/service.go create mode 100644 vendor/github.com/containerd/containerd/services/server/server.go create mode 100644 vendor/github.com/containerd/containerd/services/server/server_linux.go create mode 100644 vendor/github.com/containerd/containerd/services/server/server_solaris.go create mode 100644 vendor/github.com/containerd/containerd/services/server/server_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/services/server/server_windows.go create mode 100644 vendor/github.com/containerd/containerd/services/snapshots/service.go create mode 100644 vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/native/native.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/bolt.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/metastore.go diff --git a/daemon/images/generators_test.go b/daemon/images/generators_test.go new file mode 100644 index 0000000000000..ebbcae6143b6c --- /dev/null +++ b/daemon/images/generators_test.go @@ -0,0 +1,185 @@ +package images + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/archive/tartest" + "github.com/containerd/containerd/content" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/vmihailenco/bufio" +) + +type ingest func(context.Context, content.Store) error + +type construct func(*ocispec.Descriptor) ingest + +type configOpt func(*ocispec.Image) + +type manifestOpt func(*ocispec.Manifest) ingest + +func nilIngest(context.Context, content.Store) error { + return nil +} + +func errIngest(err error) ingest { + return func(context.Context, content.Store) error { + return err + } +} + +func multiIngest(ingests ...ingest) ingest { + return func(ctx context.Context, i content.Store) error { + for _, ing := range ingests { + if err := ing(ctx, i); err != nil { + return err + } + } + return nil + } +} + +func bytesIngest(p []byte, m string) ingest { + desc := ocispec.Descriptor{ + MediaType: m, + Digest: digest.FromBytes(p), + Size: int64(len(p)), + } + + return func(ctx context.Context, i content.Store) error { + return content.WriteBlob(ctx, i, desc.Digest.String(), bytes.NewReader(p), desc) + } +} + +func withRootFS(diffIDs ...digest.Digest) configOpt { + return func(i *ocispec.Image) { + i.RootFS.Type = "layers" + i.RootFS.DiffIDs = diffIDs + } +} + +func withConfig(opts ...configOpt) manifestOpt { + return func(m *ocispec.Manifest) ingest { + var diffIDs []digest.Digest + for _, l := range m.Layers { + if l.Annotations != nil { + if uncompressed, ok := l.Annotations["uncompressed"]; ok { + diffIDs = append(diffIDs, digest.Digest(uncompressed)) + } + } + } + // Add at beginning so any overriding RootFS is used + newopts := append([]configOpt{}, withRootFS(diffIDs...)) + + return createConfig(append(newopts, opts...)...)(&m.Config) + } +} + +// withLayers creates all the layers and adds them to the manifest +func withLayers(layers ...tartest.WriterToTar) manifestOpt { + return func(m *ocispec.Manifest) ingest { + var ingests []ingest + for _, l := range layers { + br := bufio.NewBuffer(nil) + dgstr := digest.Canonical.Digester() + cw, err := compression.CompressStream(br, compression.Gzip) + if err != nil { + return errIngest(err) + } + r := io.TeeReader(tartest.TarFromWriterTo(l), dgstr.Hash()) + if _, err := io.Copy(cw, r); err != nil { + return errIngest(err) + } + p := br.Bytes() + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayerGzip, + Digest: digest.FromBytes(p), + Size: int64(len(p)), + Annotations: map[string]string{ + "uncompressed": dgstr.Digest().String(), + }, + } + ingests = append(ingests, bytesIngest(p, desc.MediaType)) + m.Layers = append(m.Layers, desc) + } + + return multiIngest(ingests...) + } +} + +func createConfig(opts ...configOpt) construct { + var config ocispec.Image + for _, opt := range opts { + opt(&config) + } + return func(desc *ocispec.Descriptor) ingest { + p, err := json.Marshal(config) + if err != nil { + return errIngest(err) + } + *desc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(p), + Size: int64(len(p)), + } + return bytesIngest(p, desc.MediaType) + + } +} + +func createManifest(opts ...manifestOpt) construct { + var m ocispec.Manifest + var ingests []ingest + for _, opt := range opts { + ingests = append(ingests, opt(&m)) + } + + // strip annotations to match existing Docker behavior + // TODO(containerd): consider this as optional + for i := range m.Layers { + m.Layers[i].Annotations = nil + } + + return func(desc *ocispec.Descriptor) ingest { + p, err := json.Marshal(m) + if err != nil { + return errIngest(err) + } + *desc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.FromBytes(p), + Size: int64(len(p)), + } + return multiIngest(append(ingests, bytesIngest(p, desc.MediaType))...) + + } +} + +// TODO(containerd): find a way to add annotations... +func createIndex(references ...construct) construct { + idx := ocispec.Index{ + Manifests: make([]ocispec.Descriptor, len(references)), + } + var ingests []ingest + for i, ref := range references { + ingests = append(ingests, ref(&idx.Manifests[i])) + } + + return func(desc *ocispec.Descriptor) ingest { + p, err := json.Marshal(idx) + if err != nil { + return errIngest(err) + } + *desc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageIndex, + Digest: digest.FromBytes(p), + Size: int64(len(p)), + } + return multiIngest(append(ingests, bytesIngest(p, desc.MediaType))...) + + } +} diff --git a/daemon/images/images.go b/daemon/images/images.go index 7e721d09ff5e8..be55f65636012 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -281,24 +281,15 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al tags := map[string]struct{}{} for _, img := range imgs { - if _, ok := img.Labels[LabelImageDangling]; !ok { - ref, err := reference.Parse(img.Name) - if err != nil { - continue - } - if named, ok := ref.(reference.Named); ok { - if c, ok := named.(reference.Canonical); ok { - digests[reference.FamiliarString(c)] = struct{}{} - } else if t, ok := named.(reference.Tagged); ok { - tags[reference.FamiliarString(t)] = struct{}{} - } - - switch img.Target.MediaType { - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - // digest references only refer to manifests - default: - digests[reference.FamiliarName(named)+"@"+img.Target.Digest.String()] = struct{}{} - } + ref, err := reference.Parse(img.Name) + if err != nil { + continue + } + if named, ok := ref.(reference.Named); ok { + if c, ok := named.(reference.Canonical); ok { + digests[reference.FamiliarString(c)] = struct{}{} + } else if t, ok := named.(reference.Tagged); ok { + tags[reference.FamiliarString(t)] = struct{}{} } } } @@ -310,7 +301,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al newImage.RepoTags = append(newImage.RepoTags, t) } - if len(newImage.RepoDigests) == 0 && len(newImage.RepoTags) == 0 { + if len(newImage.RepoTags) == 0 { // TODO(containerd): also skip if has children if !all { continue @@ -325,7 +316,11 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al continue } - newImage.RepoDigests = []string{"none@none"} + if len(newImage.RepoDigests) == 0 { + // TODO(containerd): Requires querying content store directly, + // not currently possible + newImage.RepoTags = []string{"none@none"} + } newImage.RepoTags = []string{"none:none"} } diff --git a/daemon/images/images_test.go b/daemon/images/images_test.go new file mode 100644 index 0000000000000..5e3acab9a1b7a --- /dev/null +++ b/daemon/images/images_test.go @@ -0,0 +1,206 @@ +package images + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/containerd/containerd/archive/tartest" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func testListImages(ctx context.Context, t *testing.T, is *ImageService) { + tc := tartest.TarContext{}.WithModTime(time.Now().UTC()) + + type testImage struct { + names []string + image construct + // TODO(containerd): unpack + // TODO(containerd): parent index + } + type imageCheck func(*testing.T, *types.ImageSummary, []ocispec.Descriptor) + + type checkOpt func(*types.ImageSummary, []ocispec.Descriptor) + + withID := func(i int) checkOpt { + return func(s *types.ImageSummary, images []ocispec.Descriptor) { + s.ID = images[i].Digest.String() + } + } + + withTags := func(tags ...string) checkOpt { + return func(s *types.ImageSummary, images []ocispec.Descriptor) { + s.RepoTags = tags + } + } + + withDigests := func(i int, tags ...string) checkOpt { + return func(s *types.ImageSummary, images []ocispec.Descriptor) { + digests := make([]string, len(tags)) + for j := range tags { + digests[j] = fmt.Sprintf("%s@%s", tags[j], images[i].Digest.String()) + } + s.RepoDigests = digests + } + } + + check := func(opts ...checkOpt) imageCheck { + return func(t *testing.T, a *types.ImageSummary, images []ocispec.Descriptor) { + t.Helper() + + var e types.ImageSummary + for _, opt := range opts { + opt(&e, images) + } + + if e.ID != "" && e.ID != a.ID { + t.Errorf("unexpected id: expected %s, actual %s", e.ID, a.ID) + } + if e.RepoTags != nil && !reflect.DeepEqual(e.RepoTags, a.RepoTags) { + t.Errorf("unexpected tags:, expected %v, actual %v", e.RepoTags, a.RepoTags) + } + if e.RepoDigests != nil && !reflect.DeepEqual(e.RepoDigests, a.RepoDigests) { + t.Errorf("unexpected digests:, expected %v, actual %v", e.RepoDigests, a.RepoDigests) + } + } + } + + type testCase struct { + name string + images []testImage + expected []imageCheck + // TODO(containerd): filters, all, extra args + } + + for _, tcase := range []testCase{ + { + name: "SingleImageSingleTag", + images: []testImage{ + { + names: []string{"docker.io/library/someimage:latest"}, + image: createManifest( + withLayers( + tartest.TarAll( + tc.Dir("dummy", 0755), + tc.File("/dummy/file", []byte("unimportant"), 0644), + ), + ), + withConfig(), + ), + }, + }, + expected: []imageCheck{ + check(withID(0), withTags("someimage:latest"), withDigests(0, "someimage:latest")), + }, + }, + { + name: "MultiImageSingleTag", + images: []testImage{ + { + names: []string{"docker.io/library/someimage:latest"}, + image: createManifest( + withLayers( + tartest.TarAll( + tc.Dir("dummy", 0755), + tc.File("/dummy/file", []byte("unimportant"), 0644), + ), + ), + withConfig(), + ), + }, + { + names: []string{"docker.io/library/someimage:latest"}, + image: createManifest( + withLayers( + tartest.TarAll( + tc.Dir("dummy", 0755), + tc.File("/dummy/file", []byte("updated"), 0644), + ), + ), + withConfig(), + ), + }, + }, + expected: []imageCheck{ + check(withID(1), withTags("someimage:latest"), withDigests(1, "someimage:latest")), + }, + }, + } { + ctx, cleanup, err := is.client.WithLease(ctx) + if err != nil { + t.Fatal(err) + } + var created []string + t.Run(tcase.name, func(t *testing.T) { + var imgs []ocispec.Descriptor + cis := is.client.ImageService() + for _, imagec := range tcase.images { + var desc ocispec.Descriptor + if err := imagec.image(&desc)(ctx, is.client.ContentStore()); err != nil { + t.Fatal(err) + } + + for _, name := range imagec.names { + img := images.Image{ + Name: name, + Target: desc, + } + _, err = cis.Create(ctx, img) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + t.Fatal(err) + } + if _, err := cis.Update(ctx, img); err != nil { + t.Fatal(err) + } + } else { + created = append(created, img.Name) + } + + img.Name = img.Name + "@" + desc.Digest.String() + _, err = cis.Create(ctx, img) + if err != nil { + t.Fatal(err) + } + created = append(created, img.Name) + + } + // TODO(containerd): Unpack image? + // TODO(containerd): Set parent + imgs = append(imgs, desc) + } + + listed, err := is.Images(ctx, filters.NewArgs(), false, false) + if err != nil { + t.Fatal(err) + } + + if len(listed) != len(tcase.expected) { + t.Fatalf("unexpected number of images: expected %d, actual %d", len(tcase.expected), len(listed)) + } + + for i := range listed { + tcase.expected[i](t, listed[i], imgs) + } + }) + if err := cleanup(ctx); err != nil { + t.Fatal(err) + } + cis := is.client.ImageService() + for i, name := range created { + var opts []images.DeleteOpt + if i == len(created)-1 { + opts = append(opts, images.SynchronousDelete()) + } + if err := cis.Delete(ctx, name, opts...); err != nil && !errdefs.IsNotFound(err) { + t.Fatal(err) + } + } + } +} diff --git a/daemon/images/service.go b/daemon/images/service.go index 04f37f3a88793..1af0dde7f4c94 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -44,16 +44,17 @@ type LayerBackend struct { // ImageServiceConfig is the configuration used to create a new ImageService type ImageServiceConfig struct { - DefaultNamespace string - DefaultPlatform ocispec.Platform - Client *containerd.Client - ContainerStore containerStore - DistributionMetadataStore metadata.Store - EventsService *daemonevents.Events - LayerBackends []LayerBackend - MaxConcurrentDownloads int - MaxConcurrentUploads int + DefaultNamespace string + DefaultPlatform ocispec.Platform + Client *containerd.Client + ContainerStore containerStore + EventsService *daemonevents.Events + LayerBackends []LayerBackend + MaxConcurrentDownloads int + MaxConcurrentUploads int + // deprecated + DistributionMetadataStore metadata.Store // deprecated ImageStore image.Store // deprecated diff --git a/daemon/images/service_test.go b/daemon/images/service_test.go new file mode 100644 index 0000000000000..558ec1e3d88a6 --- /dev/null +++ b/daemon/images/service_test.go @@ -0,0 +1,249 @@ +package images + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/containerd/containerd" + containers "github.com/containerd/containerd/api/services/containers/v1" + diff "github.com/containerd/containerd/api/services/diff/v1" + imagessrv "github.com/containerd/containerd/api/services/images/v1" + namespacessrv "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/containerd/containerd/services/server" + srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/layer" + "github.com/pkg/errors" + + _ "github.com/containerd/containerd/diff/walking/plugin" + _ "github.com/containerd/containerd/gc/scheduler" + _ "github.com/containerd/containerd/services/containers" + _ "github.com/containerd/containerd/services/content" + _ "github.com/containerd/containerd/services/diff" + _ "github.com/containerd/containerd/services/images" + _ "github.com/containerd/containerd/services/leases" + _ "github.com/containerd/containerd/services/namespaces" + _ "github.com/containerd/containerd/services/snapshots" +) + +var ( + plugins []*plugin.Registration + pluginLoad sync.Once +) + +func loadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) { + var err error + pluginLoad.Do(func() { + plugins, err = server.LoadPlugins(ctx, config) + }) + return plugins, err + +} + +func containerdServiceOpt(ctx context.Context, root string) (containerd.ClientOpt, error) { + config := srvconfig.Config{ + Root: filepath.Join(root, "root"), + State: filepath.Join(root, "state"), + } + + if err := os.MkdirAll(config.Root, 0711); err != nil { + return nil, err + } + if err := os.MkdirAll(config.State, 0711); err != nil { + return nil, err + } + plugins, err := loadPlugins(ctx, &config) + if err != nil { + return nil, err + } + + events := exchange.NewExchange() + initialized := plugin.NewPluginSet() + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + config.Root, + config.State, + ) + initContext.Events = events + + // load the plugin specific configuration if it is provided + if p.Config != nil { + pluginConfig, err := config.Decode(p.ID, p.Config) + if err != nil { + return nil, err + } + initContext.Config = pluginConfig + } + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return nil, errors.Wrapf(err, "could not add plugin result to plugin set") + } + } + + initContext := plugin.NewContext( + ctx, + &plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "unittest", + }, + initialized, + config.Root, + config.State, + ) + initContext.Events = events + + servicesOpts, err := getServicesOpts(initContext) + if err != nil { + return nil, err + } + + return containerd.WithServices(servicesOpts...), nil +} + +// getServicesOpts get service options from plugin context. +func getServicesOpts(ic *plugin.InitContext) ([]containerd.ServicesOpt, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, errors.Wrap(err, "failed to get service plugin") + } + + opts := []containerd.ServicesOpt{ + containerd.WithEventService(ic.Events), + } + for s, fn := range map[string]func(interface{}) containerd.ServicesOpt{ + services.ContentService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithContentStore(s.(content.Store)) + }, + services.ImagesService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithImageService(s.(imagessrv.ImagesClient)) + }, + services.SnapshotsService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithSnapshotters(s.(map[string]snapshots.Snapshotter)) + }, + services.ContainersService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithContainerService(s.(containers.ContainersClient)) + }, + //services.TasksService: func(s interface{}) containerd.ServicesOpt { + // return containerd.WithTaskService(s.(tasks.TasksClient)) + //}, + services.DiffService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithDiffService(s.(diff.DiffClient)) + }, + services.NamespacesService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithNamespaceService(s.(namespacessrv.NamespacesClient)) + }, + services.LeasesService: func(s interface{}) containerd.ServicesOpt { + return containerd.WithLeasesService(s.(leases.Manager)) + }, + } { + p := plugins[s] + if p == nil { + return nil, errors.Errorf("service %q not found", s) + } + i, err := p.Instance() + if err != nil { + return nil, errors.Wrapf(err, "failed to get instance of service %q", s) + } + if i == nil { + return nil, errors.Errorf("instance of service %q not found", s) + } + opts = append(opts, fn(i)) + } + return opts, nil +} + +type testFunc func(context.Context, *testing.T, *ImageService) + +func setupTest(ctx context.Context, root string, service containerd.ClientOpt, fn testFunc) func(*testing.T) { + return func(t *testing.T) { + name := t.Name() + name = name[strings.IndexByte(name, '/')+1:] + root = filepath.Join(root, name) + platform := platforms.DefaultSpec() + + ctx = namespaces.WithNamespace(ctx, name) + + client, err := containerd.New( + "", + containerd.WithDefaultNamespace(name), + service, + ) + if err != nil { + t.Fatalf("Failed to get containerd client: %v", err) + } + + // TODO(containerd): Use a mocked layer store or one backed by containerd? + idMapping, err := getIDMapping() + if err != nil { + t.Fatal(err) + } + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + Root: root, + MetadataStorePathTemplate: filepath.Join(root, "layerdb"), + GraphDriver: testgraphdriver, + IDMapping: idMapping, + OS: platform.OS, + }) + if err != nil { + t.Fatalf("Failed to initialize layer store: %v", err) + } + + config := ImageServiceConfig{ + DefaultNamespace: name, + DefaultPlatform: platforms.DefaultSpec(), + Client: client, + LayerBackends: []LayerBackend{ + { + Store: ls, + Platform: platforms.Only(platform), + }, + }, + //ContainerStore containerStore + //EventsService *daemonevents.Events + //MaxConcurrentDownloads: 3, + //MaxConcurrentUploads: 3, + } + + fn(ctx, t, NewImageService(config)) + } +} + +func TestImageService(t *testing.T) { + ctx := context.Background() + td, err := ioutil.TempDir("", "imagetest-") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := os.RemoveAll(td); err != nil { + t.Errorf("Failed to remove temp dir %s: %s", td, err) + } + }() + service, err := containerdServiceOpt(ctx, td) + if err != nil { + t.Error(err) + return + } + + t.Run("ListImages", setupTest(ctx, td, service, testListImages)) + +} diff --git a/daemon/images/service_unix_test.go b/daemon/images/service_unix_test.go new file mode 100644 index 0000000000000..e70cd2c070f77 --- /dev/null +++ b/daemon/images/service_unix_test.go @@ -0,0 +1,45 @@ +// +build darwin freebsd solaris linux + +package images + +import ( + "os/user" + "strconv" + + _ "github.com/containerd/containerd/snapshots/native" + _ "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/idtools" +) + +var testgraphdriver = "vfs" + +func getIDMapping() (*idtools.IdentityMapping, error) { + u, err := user.Current() + if err != nil { + return nil, err + } + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return nil, err + } + if uid == 0 { + return &idtools.IdentityMapping{}, nil + } + gid, err := strconv.Atoi(u.Gid) + if err != nil { + return nil, err + } + + uidM := idtools.IDMap{ + ContainerID: 0, + HostID: uid, + Size: 1, + } + gidM := idtools.IDMap{ + ContainerID: 0, + HostID: gid, + Size: 1, + } + + return idtools.NewIDMappingsFromMaps([]idtools.IDMap{uidM}, []idtools.IDMap{gidM}), nil +} diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go new file mode 100644 index 0000000000000..d5b4ff45d39ab --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + "io/ioutil" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NewFileSystemApplier returns an applier which simply mounts +// and applies diff onto the mounted filesystem. +func NewFileSystemApplier(cs content.Provider) diff.Applier { + return &fsApplier{ + store: cs, + } +} + +type fsApplier struct { + store content.Provider +} + +var emptyDesc = ocispec.Descriptor{} + +// Apply applies the content associated with the provided digests onto the +// provided mounts. Archive content will be extracted and decompressed if +// necessary. +func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) { + t1 := time.Now() + defer func() { + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "d": time.Since(t1), + "dgst": desc.Digest, + "size": desc.Size, + "media": desc.MediaType, + }).Debugf("diff applied") + } + }() + + isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType) + if err != nil { + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.store.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + r := content.NewReader(ra) + if isCompressed { + ds, err := compression.DecompressStream(r) + if err != nil { + return err + } + defer ds.Close() + r = ds + } + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(r, digester.Hash()), + } + + if _, err := archive.Apply(ctx, root, rc); err != nil { + return err + } + + // Read any trailing data + if _, err := io.Copy(ioutil.Discard, rc); err != nil { + return err + } + + ocidesc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + return nil + + }); err != nil { + return emptyDesc, err + } + return ocidesc, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go new file mode 100644 index 0000000000000..a45a5630b5347 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -0,0 +1,181 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package walking + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "math/rand" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type walkingDiff struct { + store content.Store +} + +var emptyDesc = ocispec.Descriptor{} +var uncompressed = "containerd.io/uncompressed" + +// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is +// calculated by mounting both the upper and lower mount sets and walking the +// mounted directories concurrently. Changes are calculated by comparing files +// against each other or by comparing file existence between directories. +// NewWalkingDiff uses no special characteristics of the mount sets and is +// expected to work with any filesystem. +func NewWalkingDiff(store content.Store) diff.Comparer { + return &walkingDiff{ + store: store, + } +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + var isCompressed bool + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, + content.WithRef(config.Reference), + content.WithDescriptor(ocispec.Descriptor{ + MediaType: config.MediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return errors.Wrap(err, "failed to open writer") + } + defer func() { + if err != nil { + cw.Close() + if newReference { + if err := s.store.Abort(ctx, config.Reference); err != nil { + log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if err := cw.Truncate(0); err != nil { + return err + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + compressed, err := compression.CompressStream(cw, compression.Gzip) + if err != nil { + return errors.Wrap(err, "failed to get compressed stream") + } + err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + compressed.Close() + if err != nil { + return errors.Wrap(err, "failed to write compressed diff") + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[uncompressed] = dgstr.Digest().String() + } else { + if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil { + return errors.Wrap(err, "failed to write diff") + } + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to commit") + } + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return errors.Wrap(err, "failed to get info from content store") + } + + // Set uncompressed label if digest already existed without label + if _, ok := info.Labels[uncompressed]; !ok { + info.Labels[uncompressed] = config.Labels[uncompressed] + if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + return errors.Wrap(err, "error setting uncompressed label") + } + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go b/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go new file mode 100644 index 0000000000000..a2dcc1094b640 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/diff/apply" + "github.com/containerd/containerd/diff/walking" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.DiffPlugin, + ID: "walking", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + cs := md.(*metadata.DB).ContentStore() + + return diffPlugin{ + Comparer: walking.NewWalkingDiff(cs), + Applier: apply.NewFileSystemApplier(cs), + }, nil + }, + }) +} + +type diffPlugin struct { + diff.Comparer + diff.Applier +} diff --git a/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go b/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go new file mode 100644 index 0000000000000..f5890fb74da8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go @@ -0,0 +1,356 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/plugin" + "github.com/pkg/errors" +) + +// config configures the garbage collection policies. +type config struct { + // PauseThreshold represents the maximum amount of time garbage + // collection should be scheduled based on the average pause time. + // For example, a value of 0.02 means that scheduled garbage collection + // pauses should present at most 2% of real time, + // or 20ms of every second. + // + // A maximum value of .5 is enforced to prevent over scheduling of the + // garbage collector, trigger options are available to run in a more + // predictable time frame after mutation. + // + // Default is 0.02 + PauseThreshold float64 `toml:"pause_threshold"` + + // DeletionThreshold is used to guarantee that a garbage collection is + // scheduled after configured number of deletions have occurred + // since the previous garbage collection. A value of 0 indicates that + // garbage collection will not be triggered by deletion count. + // + // Default 0 + DeletionThreshold int `toml:"deletion_threshold"` + + // MutationThreshold is used to guarantee that a garbage collection is + // run after a configured number of database mutations have occurred + // since the previous garbage collection. A value of 0 indicates that + // garbage collection will only be run after a manual trigger or + // deletion. Unlike the deletion threshold, the mutation threshold does + // not cause scheduling of a garbage collection, but ensures GC is run + // at the next scheduled GC. + // + // Default 100 + MutationThreshold int `toml:"mutation_threshold"` + + // ScheduleDelay is the duration in the future to schedule a garbage + // collection triggered manually or by exceeding the configured + // threshold for deletion or mutation. A zero value will immediately + // schedule. Use suffix "ms" for millisecond and "s" for second. + // + // Default is "0ms" + ScheduleDelay duration `toml:"schedule_delay"` + + // StartupDelay is the delay duration to do an initial garbage + // collection after startup. The initial garbage collection is used to + // set the base for pause threshold and should be scheduled in the + // future to avoid slowing down other startup processes. Use suffix + // "ms" for millisecond and "s" for second. + // + // Default is "100ms" + StartupDelay duration `toml:"startup_delay"` +} + +type duration time.Duration + +func (d *duration) UnmarshalText(text []byte) error { + ed, err := time.ParseDuration(string(text)) + if err != nil { + return err + } + *d = duration(ed) + return nil +} + +func (d duration) MarshalText() (text []byte, err error) { + return []byte(time.Duration(d).String()), nil +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GCPlugin, + ID: "scheduler", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + Config: &config{ + PauseThreshold: 0.02, + DeletionThreshold: 0, + MutationThreshold: 100, + ScheduleDelay: duration(0), + StartupDelay: duration(100 * time.Millisecond), + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + mdCollector, ok := md.(collector) + if !ok { + return nil, errors.Errorf("%s %T must implement collector", plugin.MetadataPlugin, md) + } + + m := newScheduler(mdCollector, ic.Config.(*config)) + + ic.Meta.Exports = map[string]string{ + "PauseThreshold": fmt.Sprint(m.pauseThreshold), + "DeletionThreshold": fmt.Sprint(m.deletionThreshold), + "MutationThreshold": fmt.Sprint(m.mutationThreshold), + "ScheduleDelay": fmt.Sprint(m.scheduleDelay), + } + + go m.run(ic.Context) + + return m, nil + }, + }) +} + +type mutationEvent struct { + ts time.Time + mutation bool + dirty bool +} + +type collector interface { + RegisterMutationCallback(func(bool)) + GarbageCollect(context.Context) (gc.Stats, error) +} + +type gcScheduler struct { + c collector + + eventC chan mutationEvent + + waiterL sync.Mutex + waiters []chan gc.Stats + + pauseThreshold float64 + deletionThreshold int + mutationThreshold int + scheduleDelay time.Duration + startupDelay time.Duration +} + +func newScheduler(c collector, cfg *config) *gcScheduler { + eventC := make(chan mutationEvent) + + s := &gcScheduler{ + c: c, + eventC: eventC, + pauseThreshold: cfg.PauseThreshold, + deletionThreshold: cfg.DeletionThreshold, + mutationThreshold: cfg.MutationThreshold, + scheduleDelay: time.Duration(cfg.ScheduleDelay), + startupDelay: time.Duration(cfg.StartupDelay), + } + + if s.pauseThreshold < 0.0 { + s.pauseThreshold = 0.0 + } + if s.pauseThreshold > 0.5 { + s.pauseThreshold = 0.5 + } + if s.mutationThreshold < 0 { + s.mutationThreshold = 0 + } + if s.scheduleDelay < 0 { + s.scheduleDelay = 0 + } + if s.startupDelay < 0 { + s.startupDelay = 0 + } + + c.RegisterMutationCallback(s.mutationCallback) + + return s +} + +func (s *gcScheduler) ScheduleAndWait(ctx context.Context) (gc.Stats, error) { + return s.wait(ctx, true) +} + +func (s *gcScheduler) wait(ctx context.Context, trigger bool) (gc.Stats, error) { + wc := make(chan gc.Stats, 1) + s.waiterL.Lock() + s.waiters = append(s.waiters, wc) + s.waiterL.Unlock() + + if trigger { + e := mutationEvent{ + ts: time.Now(), + } + go func() { + s.eventC <- e + }() + } + + var gcStats gc.Stats + select { + case stats, ok := <-wc: + if !ok { + return gcStats, errors.New("gc failed") + } + gcStats = stats + case <-ctx.Done(): + return gcStats, ctx.Err() + } + + return gcStats, nil +} + +func (s *gcScheduler) mutationCallback(dirty bool) { + e := mutationEvent{ + ts: time.Now(), + mutation: true, + dirty: dirty, + } + go func() { + s.eventC <- e + }() +} + +func schedule(d time.Duration) (<-chan time.Time, *time.Time) { + next := time.Now().Add(d) + return time.After(d), &next +} + +func (s *gcScheduler) run(ctx context.Context) { + var ( + schedC <-chan time.Time + + lastCollection *time.Time + nextCollection *time.Time + + interval = time.Second + gcTime time.Duration + collections int + // TODO(dmcg): expose collection stats as metrics + + triggered bool + deletions int + mutations int + ) + if s.startupDelay > 0 { + schedC, nextCollection = schedule(s.startupDelay) + } + for { + select { + case <-schedC: + // Check if garbage collection can be skipped because + // it is not needed or was not requested and reschedule + // it to attempt again after another time interval. + if !triggered && lastCollection != nil && deletions == 0 && + (s.mutationThreshold == 0 || mutations < s.mutationThreshold) { + schedC, nextCollection = schedule(interval) + continue + } + case e := <-s.eventC: + if lastCollection != nil && lastCollection.After(e.ts) { + continue + } + if e.dirty { + deletions++ + } + if e.mutation { + mutations++ + } else { + triggered = true + } + + // Check if condition should cause immediate collection. + if triggered || + (s.deletionThreshold > 0 && deletions >= s.deletionThreshold) || + (nextCollection == nil && ((s.deletionThreshold == 0 && deletions > 0) || + (s.mutationThreshold > 0 && mutations >= s.mutationThreshold))) { + // Check if not already scheduled before delay threshold + if nextCollection == nil || nextCollection.After(time.Now().Add(s.scheduleDelay)) { + // TODO(dmcg): track re-schedules for tuning schedule config + schedC, nextCollection = schedule(s.scheduleDelay) + } + } + + continue + case <-ctx.Done(): + return + } + + s.waiterL.Lock() + + stats, err := s.c.GarbageCollect(ctx) + last := time.Now() + if err != nil { + log.G(ctx).WithError(err).Error("garbage collection failed") + + // Reschedule garbage collection for same duration + 1 second + schedC, nextCollection = schedule(nextCollection.Sub(*lastCollection) + time.Second) + + // Update last collection time even though failure occurred + lastCollection = &last + + for _, w := range s.waiters { + close(w) + } + s.waiters = nil + s.waiterL.Unlock() + continue + } + + log.G(ctx).WithField("d", stats.Elapsed()).Debug("garbage collected") + + gcTime += stats.Elapsed() + collections++ + triggered = false + deletions = 0 + mutations = 0 + + // Calculate new interval with updated times + if s.pauseThreshold > 0.0 { + // Set interval to average gc time divided by the pause threshold + // This algorithm ensures that a gc is scheduled to allow enough + // runtime in between gc to reach the pause threshold. + // Pause threshold is always 0.0 < threshold <= 0.5 + avg := float64(gcTime) / float64(collections) + interval = time.Duration(avg/s.pauseThreshold - avg) + } + + lastCollection = &last + schedC, nextCollection = schedule(interval) + + for _, w := range s.waiters { + w <- stats + } + s.waiters = nil + s.waiterL.Unlock() + } +} diff --git a/vendor/github.com/containerd/containerd/services/containers/helpers.go b/vendor/github.com/containerd/containerd/services/containers/helpers.go new file mode 100644 index 0000000000000..dde4caed19b68 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/helpers.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/containers" +) + +func containersToProto(containers []containers.Container) []api.Container { + var containerspb []api.Container + + for _, image := range containers { + containerspb = append(containerspb, containerToProto(&image)) + } + + return containerspb +} + +func containerToProto(container *containers.Container) api.Container { + return api.Container{ + ID: container.ID, + Labels: container.Labels, + Image: container.Image, + Runtime: &api.Container_Runtime{ + Name: container.Runtime.Name, + Options: container.Runtime.Options, + }, + Spec: container.Spec, + Snapshotter: container.Snapshotter, + SnapshotKey: container.SnapshotKey, + CreatedAt: container.CreatedAt, + UpdatedAt: container.UpdatedAt, + Extensions: container.Extensions, + } +} + +func containerFromProto(containerpb *api.Container) containers.Container { + var runtime containers.RuntimeInfo + if containerpb.Runtime != nil { + runtime = containers.RuntimeInfo{ + Name: containerpb.Runtime.Name, + Options: containerpb.Runtime.Options, + } + } + return containers.Container{ + ID: containerpb.ID, + Labels: containerpb.Labels, + Image: containerpb.Image, + Runtime: runtime, + Spec: containerpb.Spec, + Snapshotter: containerpb.Snapshotter, + SnapshotKey: containerpb.SnapshotKey, + Extensions: containerpb.Extensions, + } +} diff --git a/vendor/github.com/containerd/containerd/services/containers/local.go b/vendor/github.com/containerd/containerd/services/containers/local.go new file mode 100644 index 0000000000000..7b1a24b8f2140 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/local.go @@ -0,0 +1,243 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + "context" + "io" + + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcm "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ContainersService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return &local{ + db: m.(*metadata.DB), + publisher: ic.Events, + }, nil + }, + }) +} + +type local struct { + db *metadata.DB + publisher events.Publisher +} + +var _ api.ContainersClient = &local{} + +func (l *local) Get(ctx context.Context, req *api.GetContainerRequest, _ ...grpc.CallOption) (*api.GetContainerResponse, error) { + var resp api.GetContainerResponse + + return &resp, errdefs.ToGRPC(l.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + container, err := store.Get(ctx, req.ID) + if err != nil { + return err + } + containerpb := containerToProto(&container) + resp.Container = containerpb + + return nil + })) +} + +func (l *local) List(ctx context.Context, req *api.ListContainersRequest, _ ...grpc.CallOption) (*api.ListContainersResponse, error) { + var resp api.ListContainersResponse + return &resp, errdefs.ToGRPC(l.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + containers, err := store.List(ctx, req.Filters...) + if err != nil { + return err + } + resp.Containers = containersToProto(containers) + return nil + })) +} + +func (l *local) ListStream(ctx context.Context, req *api.ListContainersRequest, _ ...grpc.CallOption) (api.Containers_ListStreamClient, error) { + stream := &localStream{ + ctx: ctx, + } + return stream, errdefs.ToGRPC(l.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + containers, err := store.List(ctx, req.Filters...) + if err != nil { + return err + } + stream.containers = containersToProto(containers) + return nil + })) +} + +func (l *local) Create(ctx context.Context, req *api.CreateContainerRequest, _ ...grpc.CallOption) (*api.CreateContainerResponse, error) { + var resp api.CreateContainerResponse + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + container := containerFromProto(&req.Container) + + created, err := store.Create(ctx, container) + if err != nil { + return err + } + + resp.Container = containerToProto(&created) + + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + if err := l.publisher.Publish(ctx, "/containers/create", &eventstypes.ContainerCreate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Runtime: &eventstypes.ContainerCreate_Runtime{ + Name: resp.Container.Runtime.Name, + Options: resp.Container.Runtime.Options, + }, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Update(ctx context.Context, req *api.UpdateContainerRequest, _ ...grpc.CallOption) (*api.UpdateContainerResponse, error) { + if req.Container.ID == "" { + return nil, status.Errorf(codes.InvalidArgument, "Container.ID required") + } + var ( + resp api.UpdateContainerResponse + container = containerFromProto(&req.Container) + ) + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + var fieldpaths []string + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + fieldpaths = append(fieldpaths, req.UpdateMask.Paths...) + } + + updated, err := store.Update(ctx, container, fieldpaths...) + if err != nil { + return err + } + + resp.Container = containerToProto(&updated) + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/containers/update", &eventstypes.ContainerUpdate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Labels: resp.Container.Labels, + SnapshotKey: resp.Container.SnapshotKey, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *api.DeleteContainerRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + return store.Delete(ctx, req.ID) + }); err != nil { + return &ptypes.Empty{}, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/containers/delete", &eventstypes.ContainerDelete{ + ID: req.ID, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (l *local) withStore(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewContainerStore(tx)) } +} + +func (l *local) withStoreView(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return l.db.View(l.withStore(ctx, fn)) +} + +func (l *local) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return l.db.Update(l.withStore(ctx, fn)) +} + +type localStream struct { + ctx context.Context + containers []api.Container + i int +} + +func (s *localStream) Recv() (*api.ListContainerMessage, error) { + if s.i >= len(s.containers) { + return nil, io.EOF + } + c := s.containers[s.i] + s.i++ + return &api.ListContainerMessage{ + Container: &c, + }, nil +} + +func (s *localStream) Context() context.Context { + return s.ctx +} + +func (s *localStream) CloseSend() error { + return nil +} + +func (s *localStream) Header() (grpcm.MD, error) { + return nil, nil +} + +func (s *localStream) Trailer() grpcm.MD { + return nil +} + +func (s *localStream) SendMsg(m interface{}) error { + return nil +} + +func (s *localStream) RecvMsg(m interface{}) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/containers/service.go b/vendor/github.com/containerd/containerd/services/containers/service.go new file mode 100644 index 0000000000000..77e844908f0f0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/service.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + "context" + "io" + + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "containers", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.ContainersService] + if !ok { + return nil, errors.New("containers service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.ContainersClient)}, nil + }, + }) +} + +type service struct { + local api.ContainersClient +} + +var _ api.ContainersServer = &service{} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterContainersServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *api.GetContainerRequest) (*api.GetContainerResponse, error) { + return s.local.Get(ctx, req) +} + +func (s *service) List(ctx context.Context, req *api.ListContainersRequest) (*api.ListContainersResponse, error) { + return s.local.List(ctx, req) +} + +func (s *service) ListStream(req *api.ListContainersRequest, stream api.Containers_ListStreamServer) error { + containers, err := s.local.ListStream(stream.Context(), req) + if err != nil { + return err + } + for { + select { + case <-stream.Context().Done(): + return nil + default: + c, err := containers.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if err := stream.Send(c); err != nil { + return err + } + } + } +} + +func (s *service) Create(ctx context.Context, req *api.CreateContainerRequest) (*api.CreateContainerResponse, error) { + return s.local.Create(ctx, req) +} + +func (s *service) Update(ctx context.Context, req *api.UpdateContainerRequest) (*api.UpdateContainerResponse, error) { + return s.local.Update(ctx, req) +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteContainerRequest) (*ptypes.Empty, error) { + return s.local.Delete(ctx, req) +} diff --git a/vendor/github.com/containerd/containerd/services/diff/local.go b/vendor/github.com/containerd/containerd/services/diff/local.go new file mode 100644 index 0000000000000..0cb6222c5a396 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/local.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +type config struct { + // Order is the order of preference in which to try diff algorithms, the + // first differ which is supported is used. + // Note when multiple differs may be supported, this order will be + // respected for which is chosen. Each differ should return the same + // correct output, allowing any ordering to be used to prefer + // more optimimal implementations. + Order []string `toml:"default"` +} + +type differ interface { + diff.Comparer + diff.Applier +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.DiffService, + Requires: []plugin.Type{ + plugin.DiffPlugin, + }, + Config: defaultDifferConfig, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + differs, err := ic.GetByType(plugin.DiffPlugin) + if err != nil { + return nil, err + } + + orderedNames := ic.Config.(*config).Order + ordered := make([]differ, len(orderedNames)) + for i, n := range orderedNames { + differp, ok := differs[n] + if !ok { + return nil, errors.Errorf("needed differ not loaded: %s", n) + } + d, err := differp.Instance() + if err != nil { + return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) + } + + ordered[i], ok = d.(differ) + if !ok { + return nil, errors.Errorf("differ does not implement Comparer and Applier interface: %s", n) + } + } + + return &local{ + differs: ordered, + }, nil + }, + }) +} + +type local struct { + differs []differ +} + +var _ diffapi.DiffClient = &local{} + +func (l *local) Apply(ctx context.Context, er *diffapi.ApplyRequest, _ ...grpc.CallOption) (*diffapi.ApplyResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + desc = toDescriptor(er.Diff) + mounts = toMounts(er.Mounts) + ) + + for _, differ := range l.differs { + ocidesc, err = differ.Apply(ctx, desc, mounts) + if !errdefs.IsNotImplemented(err) { + break + } + } + + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.ApplyResponse{ + Applied: fromDescriptor(ocidesc), + }, nil + +} + +func (l *local) Diff(ctx context.Context, dr *diffapi.DiffRequest, _ ...grpc.CallOption) (*diffapi.DiffResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + aMounts = toMounts(dr.Left) + bMounts = toMounts(dr.Right) + ) + + var opts []diff.Opt + if dr.MediaType != "" { + opts = append(opts, diff.WithMediaType(dr.MediaType)) + } + if dr.Ref != "" { + opts = append(opts, diff.WithReference(dr.Ref)) + } + if dr.Labels != nil { + opts = append(opts, diff.WithLabels(dr.Labels)) + } + + for _, d := range l.differs { + ocidesc, err = d.Compare(ctx, aMounts, bMounts, opts...) + if !errdefs.IsNotImplemented(err) { + break + } + } + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.DiffResponse{ + Diff: fromDescriptor(ocidesc), + }, nil +} + +func toMounts(apim []*types.Mount) []mount.Mount { + mounts := make([]mount.Mount, len(apim)) + for i, m := range apim { + mounts[i] = mount.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + } + return mounts +} + +func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size: d.Size_, + } +} + +func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size_: d.Size, + } +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go new file mode 100644 index 0000000000000..369e8f84dfa06 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service.go @@ -0,0 +1,71 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "diff", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.DiffService] + if !ok { + return nil, errors.New("diff service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(diffapi.DiffClient)}, nil + }, + }) +} + +type service struct { + local diffapi.DiffClient +} + +var _ diffapi.DiffServer = &service{} + +func (s *service) Register(gs *grpc.Server) error { + diffapi.RegisterDiffServer(gs, s) + return nil +} + +func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) { + return s.local.Apply(ctx, er) +} + +func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) { + return s.local.Diff(ctx, dr) +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service_unix.go b/vendor/github.com/containerd/containerd/services/diff/service_unix.go new file mode 100644 index 0000000000000..04a85f7c4a749 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +var defaultDifferConfig = &config{ + Order: []string{"walking"}, +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service_windows.go b/vendor/github.com/containerd/containerd/services/diff/service_windows.go new file mode 100644 index 0000000000000..00584ecb5eea6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service_windows.go @@ -0,0 +1,23 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +var defaultDifferConfig = &config{ + Order: []string{"windows", "windows-lcow"}, +} diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go new file mode 100644 index 0000000000000..8ad0d117e4f80 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/helpers.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/images" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func imagesToProto(images []images.Image) []imagesapi.Image { + var imagespb []imagesapi.Image + + for _, image := range images { + imagespb = append(imagespb, imageToProto(&image)) + } + + return imagespb +} + +func imageToProto(image *images.Image) imagesapi.Image { + return imagesapi.Image{ + Name: image.Name, + Labels: image.Labels, + Target: descToProto(&image.Target), + CreatedAt: image.CreatedAt, + UpdatedAt: image.UpdatedAt, + } +} + +func imageFromProto(imagepb *imagesapi.Image) images.Image { + return images.Image{ + Name: imagepb.Name, + Labels: imagepb.Labels, + Target: descFromProto(&imagepb.Target), + CreatedAt: imagepb.CreatedAt, + UpdatedAt: imagepb.UpdatedAt, + } +} + +func descFromProto(desc *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: desc.MediaType, + Size: desc.Size_, + Digest: desc.Digest, + } +} + +func descToProto(desc *ocispec.Descriptor) types.Descriptor { + return types.Descriptor{ + MediaType: desc.MediaType, + Size_: desc.Size, + Digest: desc.Digest, + } +} diff --git a/vendor/github.com/containerd/containerd/services/images/local.go b/vendor/github.com/containerd/containerd/services/images/local.go new file mode 100644 index 0000000000000..ddd815a191f9c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/local.go @@ -0,0 +1,180 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ImagesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + plugin.GCPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + g, err := ic.Get(plugin.GCPlugin) + if err != nil { + return nil, err + } + + return &local{ + store: metadata.NewImageStore(m.(*metadata.DB)), + publisher: ic.Events, + gc: g.(gcScheduler), + }, nil + }, + }) +} + +type gcScheduler interface { + ScheduleAndWait(context.Context) (gc.Stats, error) +} + +type local struct { + store images.Store + gc gcScheduler + publisher events.Publisher +} + +var _ imagesapi.ImagesClient = &local{} + +func (l *local) Get(ctx context.Context, req *imagesapi.GetImageRequest, _ ...grpc.CallOption) (*imagesapi.GetImageResponse, error) { + image, err := l.store.Get(ctx, req.Name) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + imagepb := imageToProto(&image) + return &imagesapi.GetImageResponse{ + Image: &imagepb, + }, nil +} + +func (l *local) List(ctx context.Context, req *imagesapi.ListImagesRequest, _ ...grpc.CallOption) (*imagesapi.ListImagesResponse, error) { + images, err := l.store.List(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &imagesapi.ListImagesResponse{ + Images: imagesToProto(images), + }, nil +} + +func (l *local) Create(ctx context.Context, req *imagesapi.CreateImageRequest, _ ...grpc.CallOption) (*imagesapi.CreateImageResponse, error) { + log.G(ctx).WithField("name", req.Image.Name).WithField("target", req.Image.Target.Digest).Debugf("create image") + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.CreateImageResponse + ) + created, err := l.store.Create(ctx, image) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&created) + + if err := l.publisher.Publish(ctx, "/images/create", &eventstypes.ImageCreate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil + +} + +func (l *local) Update(ctx context.Context, req *imagesapi.UpdateImageRequest, _ ...grpc.CallOption) (*imagesapi.UpdateImageResponse, error) { + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.UpdateImageResponse + fieldpaths []string + ) + + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + fieldpaths = append(fieldpaths, req.UpdateMask.Paths...) + } + + updated, err := l.store.Update(ctx, image, fieldpaths...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&updated) + + if err := l.publisher.Publish(ctx, "/images/update", &eventstypes.ImageUpdate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + log.G(ctx).WithField("name", req.Name).Debugf("delete image") + + if err := l.store.Delete(ctx, req.Name); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/images/delete", &eventstypes.ImageDelete{ + Name: req.Name, + }); err != nil { + return nil, err + } + + if req.Sync { + if _, err := l.gc.ScheduleAndWait(ctx); err != nil { + return nil, err + } + } + + return &ptypes.Empty{}, nil +} diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go new file mode 100644 index 0000000000000..83d802140d54d --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/service.go @@ -0,0 +1,84 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "images", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.ImagesService] + if !ok { + return nil, errors.New("images service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(imagesapi.ImagesClient)}, nil + }, + }) +} + +type service struct { + local imagesapi.ImagesClient +} + +var _ imagesapi.ImagesServer = &service{} + +func (s *service) Register(server *grpc.Server) error { + imagesapi.RegisterImagesServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) { + return s.local.Get(ctx, req) +} + +func (s *service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) { + return s.local.List(ctx, req) +} + +func (s *service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) { + return s.local.Create(ctx, req) +} + +func (s *service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) { + return s.local.Update(ctx, req) +} + +func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*ptypes.Empty, error) { + return s.local.Delete(ctx, req) +} diff --git a/vendor/github.com/containerd/containerd/services/leases/local.go b/vendor/github.com/containerd/containerd/services/leases/local.go new file mode 100644 index 0000000000000..0cb3108379593 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/leases/local.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "context" + + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + bolt "go.etcd.io/bbolt" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.LeasesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + g, err := ic.Get(plugin.GCPlugin) + if err != nil { + return nil, err + } + return &local{ + db: m.(*metadata.DB), + gc: g.(gcScheduler), + }, nil + }, + }) +} + +type gcScheduler interface { + ScheduleAndWait(context.Context) (gc.Stats, error) +} + +type local struct { + db *metadata.DB + gc gcScheduler +} + +func (l *local) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { + var lease leases.Lease + if err := l.db.Update(func(tx *bolt.Tx) error { + var err error + lease, err = metadata.NewLeaseManager(tx).Create(ctx, opts...) + return err + }); err != nil { + return leases.Lease{}, err + } + return lease, nil +} + +func (l *local) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { + var do leases.DeleteOptions + for _, opt := range opts { + if err := opt(ctx, &do); err != nil { + return err + } + } + + if err := l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).Delete(ctx, lease) + }); err != nil { + return err + } + + if do.Synchronous { + if _, err := l.gc.ScheduleAndWait(ctx); err != nil { + return err + } + } + + return nil + +} + +func (l *local) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { + var ll []leases.Lease + if err := l.db.View(func(tx *bolt.Tx) error { + var err error + ll, err = metadata.NewLeaseManager(tx).List(ctx, filters...) + return err + }); err != nil { + return nil, err + } + return ll, nil +} diff --git a/vendor/github.com/containerd/containerd/services/leases/service.go b/vendor/github.com/containerd/containerd/services/leases/service.go new file mode 100644 index 0000000000000..cc918d32db177 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/leases/service.go @@ -0,0 +1,122 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "context" + + "google.golang.org/grpc" + + api "github.com/containerd/containerd/api/services/leases/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "leases", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.LeasesService] + if !ok { + return nil, errors.New("leases service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{lm: i.(leases.Manager)}, nil + }, + }) +} + +type service struct { + lm leases.Manager +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterLeasesServer(server, s) + return nil +} + +func (s *service) Create(ctx context.Context, r *api.CreateRequest) (*api.CreateResponse, error) { + opts := []leases.Opt{ + leases.WithLabels(r.Labels), + } + if r.ID == "" { + opts = append(opts, leases.WithRandomID()) + } else { + opts = append(opts, leases.WithID(r.ID)) + } + + l, err := s.lm.Create(ctx, opts...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.CreateResponse{ + Lease: leaseToGRPC(l), + }, nil +} + +func (s *service) Delete(ctx context.Context, r *api.DeleteRequest) (*ptypes.Empty, error) { + var opts []leases.DeleteOpt + if r.Sync { + opts = append(opts, leases.SynchronousDelete) + } + if err := s.lm.Delete(ctx, leases.Lease{ + ID: r.ID, + }, opts...); err != nil { + return nil, errdefs.ToGRPC(err) + } + return &ptypes.Empty{}, nil +} + +func (s *service) List(ctx context.Context, r *api.ListRequest) (*api.ListResponse, error) { + l, err := s.lm.List(ctx, r.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + apileases := make([]*api.Lease, len(l)) + for i := range l { + apileases[i] = leaseToGRPC(l[i]) + } + + return &api.ListResponse{ + Leases: apileases, + }, nil +} + +func leaseToGRPC(l leases.Lease) *api.Lease { + return &api.Lease{ + ID: l.ID, + Labels: l.Labels, + CreatedAt: l.CreatedAt, + } +} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/local.go b/vendor/github.com/containerd/containerd/services/namespaces/local.go new file mode 100644 index 0000000000000..f50b65355a864 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/namespaces/local.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + "strings" + + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.NamespacesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return &local{ + db: m.(*metadata.DB), + publisher: ic.Events, + }, nil + }, + }) +} + +// Provide local namespaces service instead of local namespace store, +// because namespace store interface doesn't provide enough functionality +// for namespaces service. +type local struct { + db *metadata.DB + publisher events.Publisher +} + +var _ api.NamespacesClient = &local{} + +func (l *local) Get(ctx context.Context, req *api.GetNamespaceRequest, _ ...grpc.CallOption) (*api.GetNamespaceResponse, error) { + var resp api.GetNamespaceResponse + + return &resp, l.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + labels, err := store.Labels(ctx, req.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + resp.Namespace = api.Namespace{ + Name: req.Name, + Labels: labels, + } + + return nil + }) +} + +func (l *local) List(ctx context.Context, req *api.ListNamespacesRequest, _ ...grpc.CallOption) (*api.ListNamespacesResponse, error) { + var resp api.ListNamespacesResponse + + return &resp, l.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + namespaces, err := store.List(ctx) + if err != nil { + return err + } + + for _, namespace := range namespaces { + labels, err := store.Labels(ctx, namespace) + if err != nil { + // In general, this should be unlikely, since we are holding a + // transaction to service this request. + return errdefs.ToGRPC(err) + } + + resp.Namespaces = append(resp.Namespaces, api.Namespace{ + Name: namespace, + Labels: labels, + }) + } + + return nil + }) +} + +func (l *local) Create(ctx context.Context, req *api.CreateNamespaceRequest, _ ...grpc.CallOption) (*api.CreateNamespaceResponse, error) { + var resp api.CreateNamespaceResponse + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil { + return errdefs.ToGRPC(err) + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + } + + resp.Namespace = req.Namespace + return nil + }); err != nil { + return &resp, err + } + + if err := l.publisher.Publish(ctx, "/namespaces/create", &eventstypes.NamespaceCreate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil + +} + +func (l *local) Update(ctx context.Context, req *api.UpdateNamespaceRequest, _ ...grpc.CallOption) (*api.UpdateNamespaceResponse, error) { + var resp api.UpdateNamespaceResponse + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + switch { + case strings.HasPrefix(path, "labels."): + key := strings.TrimPrefix(path, "labels.") + if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil { + return err + } + default: + return status.Errorf(codes.InvalidArgument, "cannot update %q field", path) + } + } + } else { + // clear out the existing labels and then set them to the incoming request. + // get current set of labels + labels, err := store.Labels(ctx, req.Namespace.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + for k := range labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil { + return err + } + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + + } + } + + return nil + }); err != nil { + return &resp, err + } + + if err := l.publisher.Publish(ctx, "/namespaces/update", &eventstypes.NamespaceUpdate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *api.DeleteNamespaceRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + return errdefs.ToGRPC(store.Delete(ctx, req.Name)) + }); err != nil { + return &ptypes.Empty{}, err + } + // set the namespace in the context before publishing the event + ctx = namespaces.WithNamespace(ctx, req.Name) + if err := l.publisher.Publish(ctx, "/namespaces/delete", &eventstypes.NamespaceDelete{ + Name: req.Name, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (l *local) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) } +} + +func (l *local) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return l.db.View(l.withStore(ctx, fn)) +} + +func (l *local) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return l.db.Update(l.withStore(ctx, fn)) +} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/service.go b/vendor/github.com/containerd/containerd/services/namespaces/service.go new file mode 100644 index 0000000000000..d3c74a2cb641c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/namespaces/service.go @@ -0,0 +1,84 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + api "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "namespaces", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.NamespacesService] + if !ok { + return nil, errors.New("namespaces service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.NamespacesClient)}, nil + }, + }) +} + +type service struct { + local api.NamespacesClient +} + +var _ api.NamespacesServer = &service{} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterNamespacesServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *api.GetNamespaceRequest) (*api.GetNamespaceResponse, error) { + return s.local.Get(ctx, req) +} + +func (s *service) List(ctx context.Context, req *api.ListNamespacesRequest) (*api.ListNamespacesResponse, error) { + return s.local.List(ctx, req) +} + +func (s *service) Create(ctx context.Context, req *api.CreateNamespaceRequest) (*api.CreateNamespaceResponse, error) { + return s.local.Create(ctx, req) +} + +func (s *service) Update(ctx context.Context, req *api.UpdateNamespaceRequest) (*api.UpdateNamespaceResponse, error) { + return s.local.Update(ctx, req) +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteNamespaceRequest) (*ptypes.Empty, error) { + return s.local.Delete(ctx, req) +} diff --git a/vendor/github.com/containerd/containerd/services/server/server.go b/vendor/github.com/containerd/containerd/services/server/server.go new file mode 100644 index 0000000000000..6ed429146902f --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/server/server.go @@ -0,0 +1,394 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + "expvar" + "io" + "net" + "net/http" + "net/http/pprof" + "os" + "path/filepath" + "strings" + "sync" + "time" + + csapi "github.com/containerd/containerd/api/services/content/v1" + ssapi "github.com/containerd/containerd/api/services/snapshots/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + csproxy "github.com/containerd/containerd/content/proxy" + "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/pkg/dialer" + "github.com/containerd/containerd/plugin" + srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/containerd/snapshots" + ssproxy "github.com/containerd/containerd/snapshots/proxy" + metrics "github.com/docker/go-metrics" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc" +) + +// CreateTopLevelDirectories creates the top-level root and state directories. +func CreateTopLevelDirectories(config *srvconfig.Config) error { + switch { + case config.Root == "": + return errors.New("root must be specified") + case config.State == "": + return errors.New("state must be specified") + case config.Root == config.State: + return errors.New("root and state must be different paths") + } + + if err := os.MkdirAll(config.Root, 0711); err != nil { + return err + } + if err := os.MkdirAll(config.State, 0711); err != nil { + return err + } + return nil +} + +// New creates and initializes a new containerd server +func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { + if err := apply(ctx, config); err != nil { + return nil, err + } + plugins, err := LoadPlugins(ctx, config) + if err != nil { + return nil, err + } + + serverOpts := []grpc.ServerOption{ + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + } + if config.GRPC.MaxRecvMsgSize > 0 { + serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(config.GRPC.MaxRecvMsgSize)) + } + if config.GRPC.MaxSendMsgSize > 0 { + serverOpts = append(serverOpts, grpc.MaxSendMsgSize(config.GRPC.MaxSendMsgSize)) + } + rpc := grpc.NewServer(serverOpts...) + var ( + services []plugin.Service + s = &Server{ + rpc: rpc, + events: exchange.NewExchange(), + config: config, + } + initialized = plugin.NewPluginSet() + ) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + config.Root, + config.State, + ) + initContext.Events = s.events + initContext.Address = config.GRPC.Address + + // load the plugin specific configuration if it is provided + if p.Config != nil { + pluginConfig, err := config.Decode(p.ID, p.Config) + if err != nil { + return nil, err + } + initContext.Config = pluginConfig + } + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return nil, errors.Wrapf(err, "could not add plugin result to plugin set") + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) + } + continue + } + // check for grpc services that should be registered with the server + if service, ok := instance.(plugin.Service); ok { + services = append(services, service) + } + s.plugins = append(s.plugins, result) + } + // register services after all plugins have been initialized + for _, service := range services { + if err := service.Register(rpc); err != nil { + return nil, err + } + } + return s, nil +} + +// Server is the containerd main daemon +type Server struct { + rpc *grpc.Server + events *exchange.Exchange + config *srvconfig.Config + plugins []*plugin.Plugin +} + +// ServeGRPC provides the containerd grpc APIs on the provided listener +func (s *Server) ServeGRPC(l net.Listener) error { + if s.config.Metrics.GRPCHistogram { + // enable grpc time histograms to measure rpc latencies + grpc_prometheus.EnableHandlingTimeHistogram() + } + // before we start serving the grpc API register the grpc_prometheus metrics + // handler. This needs to be the last service registered so that it can collect + // metrics for every other service + grpc_prometheus.Register(s.rpc) + return trapClosedConnErr(s.rpc.Serve(l)) +} + +// ServeMetrics provides a prometheus endpoint for exposing metrics +func (s *Server) ServeMetrics(l net.Listener) error { + m := http.NewServeMux() + m.Handle("/v1/metrics", metrics.Handler()) + return trapClosedConnErr(http.Serve(l, m)) +} + +// ServeDebug provides a debug endpoint +func (s *Server) ServeDebug(l net.Listener) error { + // don't use the default http server mux to make sure nothing gets registered + // that we don't want to expose via containerd + m := http.NewServeMux() + m.Handle("/debug/vars", expvar.Handler()) + m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) + m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) + m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) + m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) + return trapClosedConnErr(http.Serve(l, m)) +} + +// Stop the containerd server canceling any open connections +func (s *Server) Stop() { + s.rpc.Stop() + for i := len(s.plugins) - 1; i >= 0; i-- { + p := s.plugins[i] + instance, err := p.Instance() + if err != nil { + log.L.WithError(err).WithField("id", p.Registration.ID). + Errorf("could not get plugin instance") + continue + } + closer, ok := instance.(io.Closer) + if !ok { + continue + } + if err := closer.Close(); err != nil { + log.L.WithError(err).WithField("id", p.Registration.ID). + Errorf("failed to close plugin") + } + } +} + +// LoadPlugins loads all plugins into containerd and generates an ordered graph +// of all plugins. +func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) { + // load all plugins into containerd + if err := plugin.Load(filepath.Join(config.Root, "plugins")); err != nil { + return nil, err + } + // load additional plugins that don't automatically register themselves + plugin.Register(&plugin.Registration{ + Type: plugin.ContentPlugin, + ID: "content", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Exports["root"] = ic.Root + return local.NewStore(ic.Root) + }, + }) + plugin.Register(&plugin.Registration{ + Type: plugin.MetadataPlugin, + ID: "bolt", + Requires: []plugin.Type{ + plugin.ContentPlugin, + plugin.SnapshotPlugin, + }, + Config: &srvconfig.BoltConfig{ + ContentSharingPolicy: srvconfig.SharingPolicyShared, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + if err := os.MkdirAll(ic.Root, 0711); err != nil { + return nil, err + } + cs, err := ic.Get(plugin.ContentPlugin) + if err != nil { + return nil, err + } + + snapshottersRaw, err := ic.GetByType(plugin.SnapshotPlugin) + if err != nil { + return nil, err + } + + snapshotters := make(map[string]snapshots.Snapshotter) + for name, sn := range snapshottersRaw { + sn, err := sn.Instance() + if err != nil { + if !plugin.IsSkipPlugin(err) { + log.G(ic.Context).WithError(err). + Warnf("could not use snapshotter %v in metadata plugin", name) + } + continue + } + snapshotters[name] = sn.(snapshots.Snapshotter) + } + + shared := true + ic.Meta.Exports["policy"] = srvconfig.SharingPolicyShared + if cfg, ok := ic.Config.(*srvconfig.BoltConfig); ok { + if cfg.ContentSharingPolicy != "" { + if err := cfg.Validate(); err != nil { + return nil, err + } + if cfg.ContentSharingPolicy == srvconfig.SharingPolicyIsolated { + ic.Meta.Exports["policy"] = srvconfig.SharingPolicyIsolated + shared = false + } + + log.L.WithField("policy", cfg.ContentSharingPolicy).Info("metadata content store policy set") + } + } + + path := filepath.Join(ic.Root, "meta.db") + ic.Meta.Exports["path"] = path + + db, err := bolt.Open(path, 0644, nil) + if err != nil { + return nil, err + } + + var dbopts []metadata.DBOpt + if !shared { + dbopts = append(dbopts, metadata.WithPolicyIsolated) + } + mdb := metadata.NewDB(db, cs.(content.Store), snapshotters, dbopts...) + if err := mdb.Init(ic.Context); err != nil { + return nil, err + } + return mdb, nil + }, + }) + + clients := &proxyClients{} + for name, pp := range config.ProxyPlugins { + var ( + t plugin.Type + f func(*grpc.ClientConn) interface{} + + address = pp.Address + ) + + switch pp.Type { + case string(plugin.SnapshotPlugin), "snapshot": + t = plugin.SnapshotPlugin + ssname := name + f = func(conn *grpc.ClientConn) interface{} { + return ssproxy.NewSnapshotter(ssapi.NewSnapshotsClient(conn), ssname) + } + + case string(plugin.ContentPlugin), "content": + t = plugin.ContentPlugin + f = func(conn *grpc.ClientConn) interface{} { + return csproxy.NewContentStore(csapi.NewContentClient(conn)) + } + default: + log.G(ctx).WithField("type", pp.Type).Warn("unknown proxy plugin type") + } + + plugin.Register(&plugin.Registration{ + Type: t, + ID: name, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Exports["address"] = address + conn, err := clients.getClient(address) + if err != nil { + return nil, err + } + return f(conn), nil + }, + }) + + } + + // return the ordered graph for plugins + return plugin.Graph(config.DisabledPlugins), nil +} + +type proxyClients struct { + m sync.Mutex + clients map[string]*grpc.ClientConn +} + +func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) { + pc.m.Lock() + defer pc.m.Unlock() + if pc.clients == nil { + pc.clients = map[string]*grpc.ClientConn{} + } else if c, ok := pc.clients[address]; ok { + return c, nil + } + + gopts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithBackoffMaxDelay(3 * time.Second), + grpc.WithDialer(dialer.Dialer), + + // TODO(stevvooe): We may need to allow configuration of this on the client. + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), + } + + conn, err := grpc.Dial(dialer.DialAddress(address), gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q", address) + } + + pc.clients[address] = conn + + return conn, nil +} + +func trapClosedConnErr(err error) error { + if err == nil { + return nil + } + if strings.Contains(err.Error(), "use of closed network connection") { + return nil + } + return err +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_linux.go b/vendor/github.com/containerd/containerd/services/server/server_linux.go new file mode 100644 index 0000000000000..96b28a572c060 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/server/server_linux.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + "os" + + "github.com/containerd/cgroups" + "github.com/containerd/containerd/log" + srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/containerd/sys" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// apply sets config settings on the server process +func apply(ctx context.Context, config *srvconfig.Config) error { + if config.OOMScore != 0 { + log.G(ctx).Debugf("changing OOM score to %d", config.OOMScore) + if err := sys.SetOOMScore(os.Getpid(), config.OOMScore); err != nil { + log.G(ctx).WithError(err).Errorf("failed to change OOM score to %d", config.OOMScore) + } + } + if config.Cgroup.Path != "" { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path)) + if err != nil { + if err != cgroups.ErrCgroupDeleted { + return err + } + if cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path), &specs.LinuxResources{}); err != nil { + return err + } + } + if err := cg.Add(cgroups.Process{ + Pid: os.Getpid(), + }); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_solaris.go b/vendor/github.com/containerd/containerd/services/server/server_solaris.go new file mode 100644 index 0000000000000..f3182211f652e --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/server/server_solaris.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + + srvconfig "github.com/containerd/containerd/server/config" +) + +func apply(_ context.Context, _ *srvconfig.Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_unsupported.go b/vendor/github.com/containerd/containerd/services/server/server_unsupported.go new file mode 100644 index 0000000000000..a6f1876510cfe --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/server/server_unsupported.go @@ -0,0 +1,29 @@ +// +build !linux,!windows,!solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + + srvconfig "github.com/containerd/containerd/services/server/config" +) + +func apply(_ context.Context, _ *srvconfig.Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_windows.go b/vendor/github.com/containerd/containerd/services/server/server_windows.go new file mode 100644 index 0000000000000..e0dd19b1d25a8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/server/server_windows.go @@ -0,0 +1,29 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + + srvconfig "github.com/containerd/containerd/services/server/config" +) + +func apply(_ context.Context, _ *srvconfig.Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/snapshots/service.go b/vendor/github.com/containerd/containerd/services/snapshots/service.go new file mode 100644 index 0000000000000..8ef7a47f562ab --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/snapshots/service.go @@ -0,0 +1,317 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshots + +import ( + "context" + + snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/containerd/containerd/snapshots" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "snapshots", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: newService, + }) +} + +var empty = &ptypes.Empty{} + +type service struct { + ss map[string]snapshots.Snapshotter +} + +func newService(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.SnapshotsService] + if !ok { + return nil, errors.New("snapshots service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + ss := i.(map[string]snapshots.Snapshotter) + return &service{ss: ss}, nil +} + +func (s *service) getSnapshotter(name string) (snapshots.Snapshotter, error) { + if name == "" { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing") + } + + sn := s.ss[name] + if sn == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name) + } + return sn, nil +} + +func (s *service) Register(gs *grpc.Server) error { + snapshotsapi.RegisterSnapshotsServer(gs, s) + return nil +} + +func (s *service) Prepare(ctx context.Context, pr *snapshotsapi.PrepareSnapshotRequest) (*snapshotsapi.PrepareSnapshotResponse, error) { + log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("prepare snapshot") + sn, err := s.getSnapshotter(pr.Snapshotter) + if err != nil { + return nil, err + } + + var opts []snapshots.Opt + if pr.Labels != nil { + opts = append(opts, snapshots.WithLabels(pr.Labels)) + } + mounts, err := sn.Prepare(ctx, pr.Key, pr.Parent, opts...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &snapshotsapi.PrepareSnapshotResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) View(ctx context.Context, pr *snapshotsapi.ViewSnapshotRequest) (*snapshotsapi.ViewSnapshotResponse, error) { + log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("prepare view snapshot") + sn, err := s.getSnapshotter(pr.Snapshotter) + if err != nil { + return nil, err + } + var opts []snapshots.Opt + if pr.Labels != nil { + opts = append(opts, snapshots.WithLabels(pr.Labels)) + } + mounts, err := sn.View(ctx, pr.Key, pr.Parent, opts...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &snapshotsapi.ViewSnapshotResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) Mounts(ctx context.Context, mr *snapshotsapi.MountsRequest) (*snapshotsapi.MountsResponse, error) { + log.G(ctx).WithField("key", mr.Key).Debugf("get snapshot mounts") + sn, err := s.getSnapshotter(mr.Snapshotter) + if err != nil { + return nil, err + } + + mounts, err := sn.Mounts(ctx, mr.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &snapshotsapi.MountsResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) Commit(ctx context.Context, cr *snapshotsapi.CommitSnapshotRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("commit snapshot") + sn, err := s.getSnapshotter(cr.Snapshotter) + if err != nil { + return nil, err + } + + var opts []snapshots.Opt + if cr.Labels != nil { + opts = append(opts, snapshots.WithLabels(cr.Labels)) + } + if err := sn.Commit(ctx, cr.Name, cr.Key, opts...); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return empty, nil +} + +func (s *service) Remove(ctx context.Context, rr *snapshotsapi.RemoveSnapshotRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("key", rr.Key).Debugf("remove snapshot") + sn, err := s.getSnapshotter(rr.Snapshotter) + if err != nil { + return nil, err + } + + if err := sn.Remove(ctx, rr.Key); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return empty, nil +} + +func (s *service) Stat(ctx context.Context, sr *snapshotsapi.StatSnapshotRequest) (*snapshotsapi.StatSnapshotResponse, error) { + log.G(ctx).WithField("key", sr.Key).Debugf("stat snapshot") + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return nil, err + } + + info, err := sn.Stat(ctx, sr.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &snapshotsapi.StatSnapshotResponse{Info: fromInfo(info)}, nil +} + +func (s *service) Update(ctx context.Context, sr *snapshotsapi.UpdateSnapshotRequest) (*snapshotsapi.UpdateSnapshotResponse, error) { + log.G(ctx).WithField("key", sr.Info.Name).Debugf("update snapshot") + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return nil, err + } + + info, err := sn.Update(ctx, toInfo(sr.Info), sr.UpdateMask.GetPaths()...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &snapshotsapi.UpdateSnapshotResponse{Info: fromInfo(info)}, nil +} + +func (s *service) List(sr *snapshotsapi.ListSnapshotsRequest, ss snapshotsapi.Snapshots_ListServer) error { + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return err + } + + var ( + buffer []snapshotsapi.Info + sendBlock = func(block []snapshotsapi.Info) error { + return ss.Send(&snapshotsapi.ListSnapshotsResponse{ + Info: block, + }) + } + ) + err = sn.Walk(ss.Context(), func(ctx context.Context, info snapshots.Info) error { + buffer = append(buffer, fromInfo(info)) + + if len(buffer) >= 100 { + if err := sendBlock(buffer); err != nil { + return err + } + + buffer = buffer[:0] + } + + return nil + }) + if err != nil { + return err + } + if len(buffer) > 0 { + // Send remaining infos + if err := sendBlock(buffer); err != nil { + return err + } + } + + return nil +} + +func (s *service) Usage(ctx context.Context, ur *snapshotsapi.UsageRequest) (*snapshotsapi.UsageResponse, error) { + sn, err := s.getSnapshotter(ur.Snapshotter) + if err != nil { + return nil, err + } + + usage, err := sn.Usage(ctx, ur.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return fromUsage(usage), nil +} + +func fromKind(kind snapshots.Kind) snapshotsapi.Kind { + if kind == snapshots.KindActive { + return snapshotsapi.KindActive + } + if kind == snapshots.KindView { + return snapshotsapi.KindView + } + return snapshotsapi.KindCommitted +} + +func fromInfo(info snapshots.Info) snapshotsapi.Info { + return snapshotsapi.Info{ + Name: info.Name, + Parent: info.Parent, + Kind: fromKind(info.Kind), + CreatedAt: info.Created, + UpdatedAt: info.Updated, + Labels: info.Labels, + } +} + +func fromUsage(usage snapshots.Usage) *snapshotsapi.UsageResponse { + return &snapshotsapi.UsageResponse{ + Inodes: usage.Inodes, + Size_: usage.Size, + } +} + +func fromMounts(mounts []mount.Mount) []*types.Mount { + out := make([]*types.Mount, len(mounts)) + for i, m := range mounts { + out[i] = &types.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + } + return out +} + +func toInfo(info snapshotsapi.Info) snapshots.Info { + return snapshots.Info{ + Name: info.Name, + Parent: info.Parent, + Kind: toKind(info.Kind), + Created: info.CreatedAt, + Updated: info.UpdatedAt, + Labels: info.Labels, + } +} + +func toKind(kind snapshotsapi.Kind) snapshots.Kind { + if kind == snapshotsapi.KindActive { + return snapshots.KindActive + } + if kind == snapshotsapi.KindView { + return snapshots.KindView + } + return snapshots.KindCommitted +} diff --git a/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go b/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go new file mode 100644 index 0000000000000..5da3651109214 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go @@ -0,0 +1,98 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshots + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/containerd/containerd/snapshots" +) + +// snapshotter wraps snapshots.Snapshotter with proper events published. +type snapshotter struct { + snapshots.Snapshotter + publisher events.Publisher +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.SnapshotsService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + db := m.(*metadata.DB) + ss := make(map[string]snapshots.Snapshotter) + for n, sn := range db.Snapshotters() { + ss[n] = newSnapshotter(sn, ic.Events) + } + return ss, nil + }, + }) +} + +func newSnapshotter(sn snapshots.Snapshotter, publisher events.Publisher) snapshots.Snapshotter { + return &snapshotter{ + Snapshotter: sn, + publisher: publisher, + } +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + mounts, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ + Key: key, + Parent: parent, + }); err != nil { + return nil, err + } + return mounts, nil +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + if err := s.Snapshotter.Commit(ctx, name, key, opts...); err != nil { + return err + } + return s.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ + Key: key, + Name: name, + }) +} + +func (s *snapshotter) Remove(ctx context.Context, key string) error { + if err := s.Snapshotter.Remove(ctx, key); err != nil { + return err + } + return s.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ + Key: key, + }) +} diff --git a/vendor/github.com/containerd/containerd/snapshots/native/native.go b/vendor/github.com/containerd/containerd/snapshots/native/native.go new file mode 100644 index 0000000000000..5532ea66d5549 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/native/native.go @@ -0,0 +1,348 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package native + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/storage" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.SnapshotPlugin, + ID: "native", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + return NewSnapshotter(ic.Root) + }, + }) +} + +type snapshotter struct { + root string + ms *storage.MetaStore +} + +// NewSnapshotter returns a Snapshotter which copies layers on the underlying +// file system. A metadata file is stored under the root. +func NewSnapshotter(root string) (snapshots.Snapshotter, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + return &snapshotter{ + root: root, + ms: ms, + }, nil +} + +// Stat returns the info for an active or committed snapshot by name or +// key. +// +// Should be used for parent resolution, existence checks and to discern +// the kind of snapshot. +func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Info{}, err + } + defer t.Rollback() + _, info, _, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return snapshots.Info{}, err + } + + info, err = storage.UpdateInfo(ctx, info, fieldpaths...) + if err != nil { + t.Rollback() + return snapshots.Info{}, err + } + + if err := t.Commit(); err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Usage{}, err + } + defer t.Rollback() + + id, info, usage, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Usage{}, err + } + + if info.Kind == snapshots.KindActive { + du, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) + if err != nil { + return snapshots.Usage{}, err + } + usage = snapshots.Usage(du) + } + + return usage, nil +} + +func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +// Mounts returns the mounts for the transaction identified by key. Can be +// called on an read-write or readonly transaction. +// +// This can be used to recover mounts after calling View or Prepare. +func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return nil, err + } + s, err := storage.GetSnapshot(ctx, key) + t.Rollback() + if err != nil { + return nil, errors.Wrap(err, "failed to get snapshot mount") + } + return o.mounts(s), nil +} + +func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + usage, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) + if err != nil { + return err + } + + if _, err := storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return errors.Wrap(err, "failed to commit snapshot") + } + return t.Commit() +} + +// Remove abandons the transaction identified by key. All resources +// associated with the key will be removed. +func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + defer func() { + if err != nil && t != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + id, _, err := storage.Remove(ctx, key) + if err != nil { + return errors.Wrap(err, "failed to remove") + } + + path := o.getSnapshotDir(id) + renamed := filepath.Join(o.root, "snapshots", "rm-"+id) + if err := os.Rename(path, renamed); err != nil { + if !os.IsNotExist(err) { + return errors.Wrap(err, "failed to rename") + } + renamed = "" + } + + err = t.Commit() + t = nil + if err != nil { + if renamed != "" { + if err1 := os.Rename(renamed, path); err1 != nil { + // May cause inconsistent data on disk + log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("failed to rename after failed commit") + } + } + return errors.Wrap(err, "failed to commit") + } + if renamed != "" { + if err := os.RemoveAll(renamed); err != nil { + // Must be cleaned up, any "rm-*" could be removed if no active transactions + log.G(ctx).WithError(err).WithField("path", renamed).Warnf("failed to remove root filesystem") + } + } + + return nil +} + +// Walk the committed snapshots. +func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return err + } + defer t.Rollback() + return storage.WalkInfo(ctx, fn) +} + +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { + var ( + path, td string + ) + + if kind == snapshots.KindActive || parent == "" { + td, err = ioutil.TempDir(filepath.Join(o.root, "snapshots"), "new-") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp dir") + } + if err := os.Chmod(td, 0755); err != nil { + return nil, errors.Wrapf(err, "failed to chmod %s to 0755", td) + } + defer func() { + if err != nil { + if td != "" { + if err1 := os.RemoveAll(td); err1 != nil { + err = errors.Wrapf(err, "remove failed: %v", err1) + } + } + if path != "" { + if err1 := os.RemoveAll(path); err1 != nil { + err = errors.Wrapf(err, "failed to remove path: %v", err1) + } + } + } + }() + } + + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return nil, err + } + + s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to create snapshot") + } + + if td != "" { + if len(s.ParentIDs) > 0 { + parent := o.getSnapshotDir(s.ParentIDs[0]) + if err := fs.CopyDir(td, parent); err != nil { + return nil, errors.Wrap(err, "copying of parent failed") + } + } + + path = o.getSnapshotDir(s.ID) + if err := os.Rename(td, path); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to rename") + } + td = "" + } + + if err := t.Commit(); err != nil { + return nil, errors.Wrap(err, "commit failed") + } + + return o.mounts(s), nil +} + +func (o *snapshotter) getSnapshotDir(id string) string { + return filepath.Join(o.root, "snapshots", id) +} + +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + var ( + roFlag string + source string + ) + + if s.Kind == snapshots.KindView { + roFlag = "ro" + } else { + roFlag = "rw" + } + + if len(s.ParentIDs) == 0 || s.Kind == snapshots.KindActive { + source = o.getSnapshotDir(s.ID) + } else { + source = o.getSnapshotDir(s.ParentIDs[0]) + } + + return []mount.Mount{ + { + Source: source, + Type: "bind", + Options: []string{ + roFlag, + "rbind", + }, + }, + } +} + +// Close closes the snapshotter +func (o *snapshotter) Close() error { + return o.ms.Close() +} diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go new file mode 100644 index 0000000000000..7716a591e00d4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go @@ -0,0 +1,606 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package storage + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +var ( + bucketKeyStorageVersion = []byte("v1") + bucketKeySnapshot = []byte("snapshots") + bucketKeyParents = []byte("parents") + + bucketKeyID = []byte("id") + bucketKeyParent = []byte("parent") + bucketKeyKind = []byte("kind") + bucketKeyInodes = []byte("inodes") + bucketKeySize = []byte("size") + + // ErrNoTransaction is returned when an operation is attempted with + // a context which is not inside of a transaction. + ErrNoTransaction = errors.New("no transaction in context") +) + +// parentKey returns a composite key of the parent and child identifiers. The +// parts of the key are separated by a zero byte. +func parentKey(parent, child uint64) []byte { + b := make([]byte, binary.Size([]uint64{parent, child})+1) + i := binary.PutUvarint(b, parent) + j := binary.PutUvarint(b[i+1:], child) + return b[0 : i+j+1] +} + +// parentPrefixKey returns the parent part of the composite key with the +// zero byte separator. +func parentPrefixKey(parent uint64) []byte { + b := make([]byte, binary.Size(parent)+1) + i := binary.PutUvarint(b, parent) + return b[0 : i+1] +} + +// getParentPrefix returns the first part of the composite key which +// represents the parent identifier. +func getParentPrefix(b []byte) uint64 { + parent, _ := binary.Uvarint(b) + return parent +} + +// GetInfo returns the snapshot Info directly from the metadata. Requires a +// context with a storage transaction. +func GetInfo(ctx context.Context, key string) (string, snapshots.Info, snapshots.Usage, error) { + var ( + id uint64 + su snapshots.Usage + si = snapshots.Info{ + Name: key, + } + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + getUsage(bkt, &su) + return readSnapshot(bkt, &id, &si) + }) + if err != nil { + return "", snapshots.Info{}, snapshots.Usage{}, err + } + + return fmt.Sprintf("%d", id), si, su, nil +} + +// UpdateInfo updates an existing snapshot info's data +func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + updated := snapshots.Info{ + Name: info.Name, + } + err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + if err := readSnapshot(sbkt, nil, &updated); err != nil { + return err + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + updated.Updated = time.Now().UTC() + if err := boltutil.WriteTimestamps(sbkt, updated.Created, updated.Updated); err != nil { + return err + } + + return boltutil.WriteLabels(sbkt, updated.Labels) + }) + if err != nil { + return snapshots.Info{}, err + } + return updated, nil +} + +// WalkInfo iterates through all metadata Info for the stored snapshots and +// calls the provided function for each. Requires a context with a storage +// transaction. +func WalkInfo(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + return withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + var ( + sbkt = bkt.Bucket(k) + si = snapshots.Info{ + Name: string(k), + } + ) + if err := readSnapshot(sbkt, nil, &si); err != nil { + return err + } + + return fn(ctx, si) + }) + }) +} + +// GetSnapshot returns the metadata for the active or view snapshot transaction +// referenced by the given key. Requires a context with a storage transaction. +func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { + err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + s.ID = fmt.Sprintf("%d", readID(sbkt)) + s.Kind = readKind(sbkt) + + if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active or view", key) + } + + if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 { + spbkt := bkt.Bucket(parentKey) + if spbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "parent does not exist") + } + + s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt)) + if err != nil { + return errors.Wrap(err, "failed to get parent chain") + } + } + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// CreateSnapshot inserts a record for an active or view snapshot with the provided parent. +func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) (s Snapshot, err error) { + switch kind { + case snapshots.KindActive, snapshots.KindView: + default: + return Snapshot{}, errors.Wrapf(errdefs.ErrInvalidArgument, "snapshot type %v invalid; only snapshots of type Active or View can be created", kind) + } + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return Snapshot{}, err + } + } + + err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + var ( + spbkt *bolt.Bucket + ) + if parent != "" { + spbkt = bkt.Bucket([]byte(parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q bucket", parent) + } + + if readKind(spbkt) != snapshots.KindCommitted { + return errors.Wrapf(errdefs.ErrInvalidArgument, "parent %q is not committed snapshot", parent) + } + } + sbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v", key) + } + return err + } + + id, err := bkt.NextSequence() + if err != nil { + return errors.Wrapf(err, "unable to get identifier for snapshot %q", key) + } + + t := time.Now().UTC() + si := snapshots.Info{ + Parent: parent, + Kind: kind, + Labels: base.Labels, + Created: t, + Updated: t, + } + if err := putSnapshot(sbkt, id, si); err != nil { + return err + } + + if spbkt != nil { + pid := readID(spbkt) + + // Store a backlink from the key to the parent. Store the snapshot name + // as the value to allow following the backlink to the snapshot value. + if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil { + return errors.Wrapf(err, "failed to write parent link for snapshot %q", key) + } + + s.ParentIDs, err = parents(bkt, spbkt, pid) + if err != nil { + return errors.Wrapf(err, "failed to get parent chain for snapshot %q", key) + } + } + + s.ID = fmt.Sprintf("%d", id) + s.Kind = kind + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// Remove removes a snapshot from the metastore. The string identifier for the +// snapshot is returned as well as the kind. The provided context must contain a +// writable transaction. +func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) { + var ( + id uint64 + si snapshots.Info + ) + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := readSnapshot(sbkt, &id, &si); err != nil { + errors.Wrapf(err, "failed to read snapshot %s", key) + } + + if pbkt != nil { + k, _ := pbkt.Cursor().Seek(parentPrefixKey(id)) + if getParentPrefix(k) == id { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + } + + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil { + return errors.Wrap(err, "failed to delete parent link") + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrap(err, "failed to delete snapshot") + } + + return nil + }); err != nil { + return "", 0, err + } + + return fmt.Sprintf("%d", id), si.Kind, nil +} + +// CommitActive renames the active snapshot transaction referenced by `key` +// as a committed snapshot referenced by `Name`. The resulting snapshot will be +// committed and readonly. The `key` reference will no longer be available for +// lookup or removal. The returned string identifier for the committed snapshot +// is the same identifier of the original active snapshot. The provided context +// must contain a writable transaction. +func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, opts ...snapshots.Opt) (string, error) { + var ( + id uint64 + base snapshots.Info + ) + for _, opt := range opts { + if err := opt(&base); err != nil { + return "", err + } + } + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + dbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return errors.Wrapf(err, "committed snapshot %v", name) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "failed to get active snapshot %q", key) + } + + var si snapshots.Info + if err := readSnapshot(sbkt, &id, &si); err != nil { + return errors.Wrapf(err, "failed to read active snapshot %q", key) + } + + if si.Kind != snapshots.KindActive { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "snapshot %q is not active", key) + } + si.Kind = snapshots.KindCommitted + si.Created = time.Now().UTC() + si.Updated = si.Created + + // Replace labels, do not inherit + si.Labels = base.Labels + + if err := putSnapshot(dbkt, id, si); err != nil { + return err + } + if err := putUsage(dbkt, usage); err != nil { + return err + } + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrapf(err, "failed to delete active snapshot %q", key) + } + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q of snapshot %q", si.Parent, key) + } + pid := readID(spbkt) + + // Updates parent back link to use new key + if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil { + return errors.Wrapf(err, "failed to update parent link %q from %q to %q", pid, key, name) + } + } + + return nil + }); err != nil { + return "", err + } + + return fmt.Sprintf("%d", id), nil +} + +// IDMap returns all the IDs mapped to their key +func IDMap(ctx context.Context) (map[string]string, error) { + m := map[string]string{} + if err := withBucket(ctx, func(ctx context.Context, bkt, _ *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + id := readID(bkt.Bucket(k)) + m[fmt.Sprintf("%d", id)] = string(k) + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + vbkt := tx.Bucket(bucketKeyStorageVersion) + if vbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + bkt := vbkt.Bucket(bucketKeySnapshot) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshots bucket does not exist") + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents)) +} + +func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + bkt := tx.Bucket(bucketKeyStorageVersion) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents)) +} + +func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion) + if err != nil { + return errors.Wrap(err, "failed to create version bucket") + } + sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot) + if err != nil { + return errors.Wrap(err, "failed to create snapshots bucket") + } + pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents) + if err != nil { + return errors.Wrap(err, "failed to create parents bucket") + } + return fn(ctx, sbkt, pbkt) +} + +func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error) { + for { + parents = append(parents, fmt.Sprintf("%d", parent)) + + parentKey := pbkt.Get(bucketKeyParent) + if len(parentKey) == 0 { + return + } + pbkt = bkt.Bucket(parentKey) + if pbkt == nil { + return nil, errors.Wrap(errdefs.ErrNotFound, "missing parent") + } + + parent = readID(pbkt) + } +} + +func readKind(bkt *bolt.Bucket) (k snapshots.Kind) { + kind := bkt.Get(bucketKeyKind) + if len(kind) == 1 { + k = snapshots.Kind(kind[0]) + } + return +} + +func readID(bkt *bolt.Bucket) uint64 { + id, _ := binary.Uvarint(bkt.Get(bucketKeyID)) + return id +} + +func readSnapshot(bkt *bolt.Bucket, id *uint64, si *snapshots.Info) error { + if id != nil { + *id = readID(bkt) + } + if si != nil { + si.Kind = readKind(bkt) + si.Parent = string(bkt.Get(bucketKeyParent)) + + if err := boltutil.ReadTimestamps(bkt, &si.Created, &si.Updated); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + si.Labels = labels + } + + return nil +} + +func putSnapshot(bkt *bolt.Bucket, id uint64, si snapshots.Info) error { + idEncoded, err := encodeID(id) + if err != nil { + return err + } + + updates := [][2][]byte{ + {bucketKeyID, idEncoded}, + {bucketKeyKind, []byte{byte(si.Kind)}}, + } + if si.Parent != "" { + updates = append(updates, [2][]byte{bucketKeyParent, []byte(si.Parent)}) + } + for _, v := range updates { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + if err := boltutil.WriteTimestamps(bkt, si.Created, si.Updated); err != nil { + return err + } + return boltutil.WriteLabels(bkt, si.Labels) +} + +func getUsage(bkt *bolt.Bucket, usage *snapshots.Usage) { + usage.Inodes, _ = binary.Varint(bkt.Get(bucketKeyInodes)) + usage.Size, _ = binary.Varint(bkt.Get(bucketKeySize)) +} + +func putUsage(bkt *bolt.Bucket, usage snapshots.Usage) error { + for _, v := range []struct { + key []byte + value int64 + }{ + {bucketKeyInodes, usage.Inodes}, + {bucketKeySize, usage.Size}, + } { + e, err := encodeSize(v.value) + if err != nil { + return err + } + if err := bkt.Put(v.key, e); err != nil { + return err + } + } + return nil +} + +func encodeSize(size int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + sizeEncoded = buf[:] + ) + sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)] + + if len(sizeEncoded) == 0 { + return nil, fmt.Errorf("failed encoding size = %v", size) + } + return sizeEncoded, nil +} + +func encodeID(id uint64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + idEncoded = buf[:] + ) + idEncoded = idEncoded[:binary.PutUvarint(idEncoded, id)] + + if len(idEncoded) == 0 { + return nil, fmt.Errorf("failed encoding id = %v", id) + } + return idEncoded, nil +} diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go new file mode 100644 index 0000000000000..69ba3ea96bbc1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go @@ -0,0 +1,115 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package storage provides a metadata storage implementation for snapshot +// drivers. Drive implementations are responsible for starting and managing +// transactions using the defined context creator. This storage package uses +// BoltDB for storing metadata. Access to the raw boltdb transaction is not +// provided, but the stored object is provided by the proto subpackage. +package storage + +import ( + "context" + "sync" + + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +// Transactor is used to finalize an active transaction. +type Transactor interface { + // Commit commits any changes made during the transaction. On error a + // caller is expected to clean up any resources which would have relied + // on data mutated as part of this transaction. Only writable + // transactions can commit, non-writable must call Rollback. + Commit() error + + // Rollback rolls back any changes made during the transaction. This + // must be called on all non-writable transactions and aborted writable + // transaction. + Rollback() error +} + +// Snapshot hold the metadata for an active or view snapshot transaction. The +// ParentIDs hold the snapshot identifiers for the committed snapshots this +// active or view is based on. The ParentIDs are ordered from the lowest base +// to highest, meaning they should be applied in order from the first index to +// the last index. The last index should always be considered the active +// snapshots immediate parent. +type Snapshot struct { + Kind snapshots.Kind + ID string + ParentIDs []string +} + +// MetaStore is used to store metadata related to a snapshot driver. The +// MetaStore is intended to store metadata related to name, state and +// parentage. Using the MetaStore is not required to implement a snapshot +// driver but can be used to handle the persistence and transactional +// complexities of a driver implementation. +type MetaStore struct { + dbfile string + + dbL sync.Mutex + db *bolt.DB +} + +// NewMetaStore returns a snapshot MetaStore for storage of metadata related to +// a snapshot driver backed by a bolt file database. This implementation is +// strongly consistent and does all metadata changes in a transaction to prevent +// against process crashes causing inconsistent metadata state. +func NewMetaStore(dbfile string) (*MetaStore, error) { + return &MetaStore{ + dbfile: dbfile, + }, nil +} + +type transactionKey struct{} + +// TransactionContext creates a new transaction context. The writable value +// should be set to true for transactions which are expected to mutate data. +func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (context.Context, Transactor, error) { + ms.dbL.Lock() + if ms.db == nil { + db, err := bolt.Open(ms.dbfile, 0600, nil) + if err != nil { + ms.dbL.Unlock() + return ctx, nil, errors.Wrap(err, "failed to open database file") + } + ms.db = db + } + ms.dbL.Unlock() + + tx, err := ms.db.Begin(writable) + if err != nil { + return ctx, nil, errors.Wrap(err, "failed to start transaction") + } + + ctx = context.WithValue(ctx, transactionKey{}, tx) + + return ctx, tx, nil +} + +// Close closes the metastore and any underlying database connections +func (ms *MetaStore) Close() error { + ms.dbL.Lock() + defer ms.dbL.Unlock() + if ms.db == nil { + return nil + } + return ms.db.Close() +} From 07decdbf0c4f3336cacdc2e3d8649302511562c2 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 13 Mar 2019 15:55:09 -0700 Subject: [PATCH 34/73] Add delete image unit tests Signed-off-by: Derek McGowan --- daemon/images/generators_test.go | 25 +++ daemon/images/image_delete_test.go | 245 +++++++++++++++++++++++++++++ daemon/images/image_events.go | 4 + daemon/images/images_test.go | 34 +--- daemon/images/service_test.go | 2 +- 5 files changed, 278 insertions(+), 32 deletions(-) create mode 100644 daemon/images/image_delete_test.go diff --git a/daemon/images/generators_test.go b/daemon/images/generators_test.go index ebbcae6143b6c..33e484b5e1f09 100644 --- a/daemon/images/generators_test.go +++ b/daemon/images/generators_test.go @@ -4,7 +4,10 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" + "math/rand" + "time" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/tartest" @@ -183,3 +186,25 @@ func createIndex(references ...construct) construct { } } + +func randomLayer(size int) tartest.WriterToTar { + now := time.Now() + tc := tartest.TarContext{}.WithModTime(now.UTC()) + r := rand.New(rand.NewSource(now.UnixNano())) + p := make([]byte, size) + if l, err := r.Read(p); err != nil || l != size { + panic(fmt.Sprintf("unable to read rand bytes: %d %v", l, err)) + } + return tartest.TarAll( + tc.Dir("/randomfiles", 0755), + tc.File("/randomfiles/1", p, 0644), + ) +} + +func randomManifest(layers int) construct { + layerOpts := make([]tartest.WriterToTar, layers) + for i := range layerOpts { + layerOpts[i] = randomLayer(10 * i) + } + return createManifest(withLayers(layerOpts...), withConfig()) +} diff --git a/daemon/images/image_delete_test.go b/daemon/images/image_delete_test.go new file mode 100644 index 0000000000000..d91ff94a90b73 --- /dev/null +++ b/daemon/images/image_delete_test.go @@ -0,0 +1,245 @@ +package images + +import ( + "context" + "strconv" + "strings" + "testing" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/docker/docker/container" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { + type testImage struct { + names []string + image construct + // TODO(containerd): parent index + + expected []string + deleted bool + } + + type testDelete struct { + ref string + id int // index of image to delete, if ref is empty + force bool + prune bool + untagged []string + deleted []int // indexs of images deleted + } + + type testCase struct { + name string + images []testImage + deletes []testDelete + containers []*container.Container + } + + for _, tc := range []testCase{ + { + name: "RemoveSingleTags", + images: []testImage{ + { + names: []string{"docker.io/library/img1:latest"}, + image: randomManifest(1), + deleted: true, + }, + { + names: []string{"docker.io/library/img2:latest"}, + image: randomManifest(2), + expected: []string{"docker.io/library/img2:latest"}, + }, + { + names: []string{"docker.io/library/img3:latest", "docker.io/library/img4:latest"}, + image: randomManifest(3), + expected: []string{"docker.io/library/img4:latest"}, + }, + }, + deletes: []testDelete{ + { + ref: "img1:latest", + untagged: []string{"img1:latest", "img1:latest@0"}, + deleted: []int{0}, + }, + { + ref: "img3:latest", + untagged: []string{"img3:latest", "img3;latest@2"}, + }, + }, + }, + } { + ctx, cleanup, err := is.client.WithLease(ctx) + if err != nil { + t.Fatal(err) + } + var created []string + t.Run(tc.name, func(t *testing.T) { + var imgs []ocispec.Descriptor + deleted := map[digest.Digest]bool{} + expected := map[string]*ocispec.Descriptor{} + + cis := is.client.ImageService() + for i, imagec := range tc.images { + var desc ocispec.Descriptor + if err := imagec.image(&desc)(ctx, is.client.ContentStore()); err != nil { + t.Fatal(err) + } + + for _, name := range imagec.names { + img := images.Image{ + Name: name, + Target: desc, + } + _, err = cis.Create(ctx, img) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + t.Fatal(err) + } + if _, err := cis.Update(ctx, img); err != nil { + t.Fatal(err) + } + } else { + created = append(created, img.Name) + expected[img.Name] = nil + } + + img.Name = img.Name + "@" + desc.Digest.String() + _, err = cis.Create(ctx, img) + if err != nil { + t.Fatal(err) + } + created = append(created, img.Name) + expected[img.Name] = nil + } + + if imagec.deleted { + deleted[desc.Digest] = true + } + for _, tag := range imagec.expected { + expected[tag] = &desc + expected[tag+"@"+desc.Digest.String()] = &desc + } + + // TODO(containerd): Unpack image and store layer + + // TODO(containerd): Set parent + + imgs = append(imgs, desc) + t.Logf("Image %d: %s", i, desc.Digest.String()) + } + + is.containers = mockContainerStore{tc.containers} + for _, del := range tc.deletes { + ref := del.ref + if ref == "" { + ref = imgs[del.id].Digest.String() + } + items, err := is.ImageDelete(ctx, ref, del.force, del.prune) + if err != nil { + t.Fatal(err) + } + if expected := len(del.deleted) + len(del.untagged); len(items) != expected { + t.Errorf("Wrong number of items: expected %d, actual %d", expected, len(items)) + } else { + untags := map[string]struct{}{} + for _, ut := range del.untagged { + untags[formatTag(ut, imgs)] = struct{}{} + } + deletes := map[string]struct{}{} + for _, idx := range del.deleted { + deletes[imgs[idx].Digest.String()] = struct{}{} + } + for _, item := range items { + if item.Deleted != "" { + if _, ok := deletes[item.Deleted]; !ok { + t.Errorf("Unexpected delete: %s", item.Deleted) + } + } + if item.Untagged != "" { + if _, ok := untags[item.Untagged]; !ok { + t.Errorf("Unexpected untag: %s", item.Untagged) + } + } + } + } + } + + cs := is.client.ContentStore() + for _, img := range imgs { + _, err := cs.Info(ctx, img.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + t.Fatal(err) + } + if !deleted[img.Digest] { + t.Errorf("Missing image %s", img.Digest) + } + } else if deleted[img.Digest] { + t.Errorf("Expected image %s to be deleted", img.Digest) + } + } + + istore := is.client.ImageService() + for name, desc := range expected { + img, err := istore.Get(ctx, name) + if err != nil { + if !errdefs.IsNotFound(err) { + t.Fatal(err) + } + if desc != nil { + t.Errorf("Missing tag %s", name) + } + } else if desc == nil { + t.Errorf("Expected tag %s to be deleted", name) + } else if desc.Digest != img.Target.Digest { + t.Errorf("Wrong tag for %s: got %s, expected %s", name, img.Target.Digest, desc.Digest) + } + } + + }) + if err := cleanup(ctx); err != nil { + t.Fatal(err) + } + cis := is.client.ImageService() + for i, name := range created { + var opts []images.DeleteOpt + if i == len(created)-1 { + opts = append(opts, images.SynchronousDelete()) + } + if err := cis.Delete(ctx, name, opts...); err != nil && !errdefs.IsNotFound(err) { + t.Fatal(err) + } + } + } +} + +func formatTag(t string, imgs []ocispec.Descriptor) string { + if i := strings.IndexByte(t, '@'); i >= 0 { + idx, err := strconv.Atoi(t[i+1:]) + if err != nil { + panic(err) + } + t = t[:i+1] + imgs[idx].Digest.String() + } + return t +} + +type mockContainerStore struct { + containers []*container.Container +} + +func (mockContainerStore) First(container.StoreFilter) *container.Container { + return nil +} + +func (s mockContainerStore) List() []*container.Container { + return s.containers +} + +func (mockContainerStore) Get(string) *container.Container { + return nil +} diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index 755003ddee18d..0d79914ae083c 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -10,6 +10,10 @@ import ( // LogImageEvent generates an event related to an image with only the default attributes. func (i *ImageService) LogImageEvent(ctx context.Context, imageID, refName, action string) { + if i.eventsService == nil { + return + } + // image has not been removed yet. // it could be missing if the event is `delete`. attributes, _ := i.getImageLabels(ctx, imageID) diff --git a/daemon/images/images_test.go b/daemon/images/images_test.go index 5e3acab9a1b7a..68946cb8a6a67 100644 --- a/daemon/images/images_test.go +++ b/daemon/images/images_test.go @@ -5,9 +5,7 @@ import ( "fmt" "reflect" "testing" - "time" - "github.com/containerd/containerd/archive/tartest" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/docker/docker/api/types" @@ -16,8 +14,6 @@ import ( ) func testListImages(ctx context.Context, t *testing.T, is *ImageService) { - tc := tartest.TarContext{}.WithModTime(time.Now().UTC()) - type testImage struct { names []string image construct @@ -84,15 +80,7 @@ func testListImages(ctx context.Context, t *testing.T, is *ImageService) { images: []testImage{ { names: []string{"docker.io/library/someimage:latest"}, - image: createManifest( - withLayers( - tartest.TarAll( - tc.Dir("dummy", 0755), - tc.File("/dummy/file", []byte("unimportant"), 0644), - ), - ), - withConfig(), - ), + image: randomManifest(1), }, }, expected: []imageCheck{ @@ -104,27 +92,11 @@ func testListImages(ctx context.Context, t *testing.T, is *ImageService) { images: []testImage{ { names: []string{"docker.io/library/someimage:latest"}, - image: createManifest( - withLayers( - tartest.TarAll( - tc.Dir("dummy", 0755), - tc.File("/dummy/file", []byte("unimportant"), 0644), - ), - ), - withConfig(), - ), + image: randomManifest(1), }, { names: []string{"docker.io/library/someimage:latest"}, - image: createManifest( - withLayers( - tartest.TarAll( - tc.Dir("dummy", 0755), - tc.File("/dummy/file", []byte("updated"), 0644), - ), - ), - withConfig(), - ), + image: randomManifest(2), }, }, expected: []imageCheck{ diff --git a/daemon/images/service_test.go b/daemon/images/service_test.go index 558ec1e3d88a6..ac7ca84d242d1 100644 --- a/daemon/images/service_test.go +++ b/daemon/images/service_test.go @@ -245,5 +245,5 @@ func TestImageService(t *testing.T) { } t.Run("ListImages", setupTest(ctx, td, service, testListImages)) - + t.Run("DeleteImages", setupTest(ctx, td, service, testDeleteImages)) } From e435ef22b210fd926d375615dd93a9dbac18f06a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Sun, 3 Mar 2019 23:34:08 -0800 Subject: [PATCH 35/73] Update rmi to use containerd gc Signed-off-by: Derek McGowan --- daemon/images/generators_test.go | 28 +- daemon/images/image.go | 4 +- daemon/images/image_delete.go | 475 ++++++++++++++--------------- daemon/images/image_delete_test.go | 119 ++++++-- daemon/images/image_pull.go | 5 +- daemon/images/service_test.go | 8 + 6 files changed, 365 insertions(+), 274 deletions(-) diff --git a/daemon/images/generators_test.go b/daemon/images/generators_test.go index 33e484b5e1f09..0111080a7a703 100644 --- a/daemon/images/generators_test.go +++ b/daemon/images/generators_test.go @@ -12,6 +12,7 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/tartest" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/vmihailenco/bufio" @@ -46,7 +47,7 @@ func multiIngest(ingests ...ingest) ingest { } } -func bytesIngest(p []byte, m string) ingest { +func bytesIngest(p []byte, m string, opts ...content.Opt) ingest { desc := ocispec.Descriptor{ MediaType: m, Digest: digest.FromBytes(p), @@ -54,7 +55,7 @@ func bytesIngest(p []byte, m string) ingest { } return func(ctx context.Context, i content.Store) error { - return content.WriteBlob(ctx, i, desc.Digest.String(), bytes.NewReader(p), desc) + return content.WriteBlob(ctx, i, desc.Digest.String(), bytes.NewReader(p), desc, opts...) } } @@ -97,6 +98,7 @@ func withLayers(layers ...tartest.WriterToTar) manifestOpt { if _, err := io.Copy(cw, r); err != nil { return errIngest(err) } + cw.Close() p := br.Bytes() desc := ocispec.Descriptor{ MediaType: ocispec.MediaTypeImageLayerGzip, @@ -115,7 +117,11 @@ func withLayers(layers ...tartest.WriterToTar) manifestOpt { } func createConfig(opts ...configOpt) construct { - var config ocispec.Image + p := platforms.DefaultSpec() + config := ocispec.Image{ + OS: p.OS, + Architecture: p.Architecture, + } for _, opt := range opts { opt(&config) } @@ -157,7 +163,13 @@ func createManifest(opts ...manifestOpt) construct { Digest: digest.FromBytes(p), Size: int64(len(p)), } - return multiIngest(append(ingests, bytesIngest(p, desc.MediaType))...) + labels := map[string]string{ + "containerd.io/gc.ref.content.config": m.Config.Digest.String(), + } + for i, l := range m.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.l%d", i)] = l.Digest.String() + } + return multiIngest(append(ingests, bytesIngest(p, desc.MediaType, content.WithLabels(labels)))...) } } @@ -182,7 +194,11 @@ func createIndex(references ...construct) construct { Digest: digest.FromBytes(p), Size: int64(len(p)), } - return multiIngest(append(ingests, bytesIngest(p, desc.MediaType))...) + labels := map[string]string{} + for i, m := range idx.Manifests { + labels[fmt.Sprintf("containerd.io/gc.ref.content.m%d", i)] = m.Digest.String() + } + return multiIngest(append(ingests, bytesIngest(p, desc.MediaType, content.WithLabels(labels)))...) } } @@ -204,7 +220,7 @@ func randomLayer(size int) tartest.WriterToTar { func randomManifest(layers int) construct { layerOpts := make([]tartest.WriterToTar, layers) for i := range layerOpts { - layerOpts[i] = randomLayer(10 * i) + layerOpts[i] = randomLayer(64 + 10*i) } return createManifest(withLayers(layerOpts...), withConfig()) } diff --git a/daemon/images/image.go b/daemon/images/image.go index 7f65a3eb941e7..c94f41acd6a43 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -25,8 +25,8 @@ const ( LabelImageID = "docker.io/image.id" // LabelImageParent is Docker's parent image ID - // Stored on the image config blob - LabelImageParent = "docker.io/image.parent" + // Stored on the image blob (config or manifest) + LabelImageParent = "containerd.io/gc.ref.content.parent" // LabelImageDangling refers to images with no name // Stored on images and points to the image config digest diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index bae14e9959236..5212adeaec421 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -3,12 +3,14 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" "fmt" - "runtime" "strings" "time" + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + creference "github.com/containerd/containerd/reference" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" @@ -16,17 +18,17 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type conflictType int const ( - conflictDependentChild conflictType = 1 << iota - conflictRunningContainer + conflictRunningContainer conflictType = 1 << iota conflictActiveReference conflictStoppedContainer - conflictHard = conflictDependentChild | conflictRunningContainer + conflictHard = conflictRunningContainer conflictSoft = conflictActiveReference | conflictStoppedContainer ) @@ -68,27 +70,32 @@ const ( func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { start := time.Now() - img, err := i.getCachedRef(ctx, imageRef) + img, err := i.ResolveImage(ctx, imageRef) if err != nil { return nil, err } - imgID := img.config.Digest.String() - repoRefs := img.references + imgID := img.Digest.String() + // TODO(containerd): Use containerd filter on list containers using := func(c *container.Container) bool { - return digest.Digest(c.ImageID) == img.config.Digest + return digest.Digest(c.ImageID) == img.Digest + } + + is := i.client.ImageService() + imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", img.Digest)) + if err != nil { + return nil, err } - var removedRepositoryRef bool if !isImageIDPrefix(imgID, imageRef) { - var deletedRefs []reference.Named + var deletedRefs []string // A repository reference was given and should be removed // first. We can only remove this reference if either force is // true, there are multiple repository references to this // image, or there are no containers using the given reference. - if !force && isSingleReference(repoRefs) { + if !force && isSingleReference(ctx, imgs) { if container := i.containers.First(using); container != nil { // If we removed the repository reference then // this image would remain "dangling" and since @@ -99,80 +106,217 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, } } + // TODO(containerd): normalize ref then use containerd reference parsing parsedRef, err := reference.ParseNormalizedNamed(imageRef) if err != nil { return nil, err } + imageRef = parsedRef.String() + locator := parsedRef.Name() - deletedRefs = append(deletedRefs, parsedRef) - i.LogImageEvent(ctx, imgID, imgID, "untag") + deletedRefs = append(deletedRefs, imageRef) // If a tag reference was removed and the only remaining // references to the same repository are digest references, // then clean up those digest references. - if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + if !isCanonicalReference(imageRef) { foundRepoTagRef := false - for _, repoRef := range repoRefs { - if parsedRef.String() == repoRef.String() { + canonicalRefs := []string{} + for _, img := range imgs { + if imageRef == img.Name { + continue + } + + spec, err := creference.Parse(img.Name) + if err != nil { + log.G(ctx).WithError(err).WithField("name", img.Name).Warnf("ignoring bad name") continue } - if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - foundRepoTagRef = true - break + if locator == spec.Locator { + if !isCanonicalReference(img.Name) { + foundRepoTagRef = true + break + } + canonicalRefs = append(canonicalRefs, img.Name) } + } if !foundRepoTagRef { // Remove canonical references from same repository - for _, repoRef := range repoRefs { - if parsedRef.String() == repoRef.String() { - continue - } - if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - // TODO(containerd): can repoRef be name only here? - deletedRefs = append(deletedRefs, repoRef) - } - } + deletedRefs = append(deletedRefs, canonicalRefs...) } } - // If it has remaining references then the untag finished the remove - if len(repoRefs)-len(deletedRefs) > 0 { - // Remove all references in containerd - // Do not wait for containerd's garbage collection - records, err := i.removeImageRefs(ctx, img, deletedRefs, false) - if err != nil { - return nil, errors.Wrap(err, "failed to delete refs") + // If it has remaining references then the untag finishes the remove + // and there is no need to check for parent reference removal + if len(imgs)-len(deletedRefs) > 0 { + records := []types.ImageDeleteResponseItem{} + for _, ref := range deletedRefs { + if err := is.Delete(ctx, ref); err != nil && !errdefs.IsNotFound(err) { + return records, errors.Wrapf(err, "failed to delete ref: %s", ref) + } + pref, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return records, errors.Wrapf(err, "failed to parse ref: %s", ref) + } + fref := reference.FamiliarString(pref) + i.LogImageEvent(ctx, imgID, fref, "untag") + records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) } return records, nil } - removedRepositoryRef = true - } else if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := i.checkImageDeleteConflict(img, c, false); conflict != nil { + log.G(ctx).Debugf("%s: ignoring conflict: %#v", img.Digest, conflict) + // TODO(containerd): Keep one reference to prevent deletion? + } + + } else { // If an ID reference was given AND there is at most one tag // reference to the image AND all references are within one // repository, then remove all references. c := conflictHard - deletedRefs := 0 - if force { - // Treated all references as deleted - deletedRefs = len(repoRefs) - } else { + active := false + if !force { // If not forced, fail on soft conflicts c |= conflictSoft + + active = !isSingleReference(ctx, imgs) } - if conflict := i.checkImageDeleteConflict(img, c, deletedRefs); conflict != nil { + + if conflict := i.checkImageDeleteConflict(img, c, active); conflict != nil { return nil, conflict } + } + + cs := i.client.ContentStore() + + layers := map[string][]digest.Digest{} + seenParents := map[string]struct{}{} + var wh images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } - i.LogImageEvent(ctx, imgID, imgID, "untag") + var parents []ocispec.Descriptor + for k, v := range info.Labels { + if k == LabelImageParent { + // Since parent relationship are client defined by labels rather + // than a hash tree, ensure parents do not mistakenly loop + if _, ok := seenParents[v]; !ok { + log.G(ctx).WithField("image", imgID).WithField("config", desc.Digest.String()).Debugf("deleted image has parent: %s", v) + parents = append(parents, ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: digest.Digest(v), + // Size can be safely ignored here + }) + seenParents[v] = struct{}{} + } + } else if strings.HasPrefix(k, LabelLayerPrefix) { + driver := k[len(LabelLayerPrefix):] + layers[driver] = append(layers[driver], digest.Digest(v)) + } + } + return parents, nil + } + return nil, nil } - // TODO(containerd): Lock, perform deletion, - // check if image exists then delete layers - records, err := i.imageDeleteHelper(ctx, img, repoRefs, force, prune, removedRepositoryRef) - if err != nil { - return nil, err + if err := images.Walk(ctx, images.Handlers(images.ChildrenHandler(cs), wh), img); err != nil { + if !cerrdefs.IsNotFound(err) { + return nil, err + } + log.G(ctx).WithError(err).Warnf("missing object, some layer removals may be excluded") + } + + // Remove all references + records := []types.ImageDeleteResponseItem{} + for j, img := range imgs { + var opts []images.DeleteOpt + if j == len(imgs)-1 { + opts = append(opts, images.SynchronousDelete()) + } + if err := is.Delete(ctx, img.Name, opts...); err != nil && !errdefs.IsNotFound(err) { + return records, errors.Wrapf(err, "failed to delete ref: %s", img.Name) + } + pref, err := reference.ParseNormalizedNamed(img.Name) + if err != nil { + return records, errors.Wrapf(err, "failed to parse ref: %s", img.Name) + } + fref := reference.FamiliarString(pref) + i.LogImageEvent(ctx, imgID, fref, "untag") + records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) + } + + // Lookup image to see if it was deleted + if _, err := cs.Info(ctx, img.Digest); err != nil { + if !cerrdefs.IsNotFound(err) { + return records, errors.Wrap(err, "failed to lookup image in content store") + } + records = append(records, types.ImageDeleteResponseItem{Deleted: imgID}) + + if len(layers) > 0 { + c, err := i.getCache(ctx) + if err != nil { + log.G(ctx).WithError(err).Errorf("unable to get cache, skipping layer removal") + } + + c.m.Lock() + for name, chainIDs := range layers { + ls, ok := i.layerStores[name] + if !ok { + log.G(ctx).WithField("driver", name).Warnf("layer store not configured for referenced layers, skipping removal") + continue + } + retained := c.layers[name] + + key := LabelLayerPrefix + name + var filters []string + unmarked := map[digest.Digest]struct{}{} + for _, chainID := range chainIDs { + filters = append(filters, fmt.Sprintf("labels.%q==%s", key, chainID)) + unmarked[chainID] = struct{}{} + } + + // Mark referenced layers by removing from unmarked + err := cs.Walk(ctx, func(i content.Info) error { + v := i.Labels[key] + if v != "" { + log.G(ctx).WithField("key", key).Debugf("Still there after removal...") + delete(unmarked, digest.Digest(v)) + } + return nil + }, filters...) + if err != nil { + log.G(ctx).WithError(err).WithField("driver", name).Errorf("mark failed, skipping layer removal") + } + + for chainID := range unmarked { + l, ok := retained[chainID] + if ok { + metadata, err := ls.Release(l) + if err != nil { + log.G(ctx).WithError(err).WithField("driver", name).WithField("layer", chainID).Errorf("layer release failed") + } + for _, m := range metadata { + log.G(ctx).WithField("driver", name).WithField("layer", m.ChainID).Infof("layer removed") + } + delete(retained, chainID) + } else { + log.G(ctx).WithField("driver", name).WithField("id", chainID.String()).Warnf("referenced layer not retained") + } + } + } + c.m.Unlock() + } } imageActions.WithValues("delete").UpdateSince(start) @@ -180,29 +324,47 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, return records, nil } +func isCanonicalReference(ref string) bool { + // TODO(containerd): Use a regex + return strings.ContainsAny(ref, "@") +} + // isSingleReference returns true when all references are from one repository // and there is at most one tag. Returns false for empty input. -func isSingleReference(repoRefs []reference.Named) bool { - if len(repoRefs) <= 1 { - return len(repoRefs) == 1 +func isSingleReference(ctx context.Context, imgs []images.Image) bool { + if len(imgs) <= 1 { + return len(imgs) == 1 } - var singleRef reference.Named + var singleRef string canonicalRefs := map[string]struct{}{} - for _, repoRef := range repoRefs { - if _, isCanonical := repoRef.(reference.Canonical); isCanonical { - canonicalRefs[repoRef.Name()] = struct{}{} - } else if singleRef == nil { - singleRef = repoRef + for _, img := range imgs { + if isCanonicalReference(img.Name) { + ref, err := creference.Parse(img.Name) + if err != nil { + log.G(ctx).WithField("name", img.Name).Warnf("ignoring unparseable reference") + continue + } + canonicalRefs[ref.Locator] = struct{}{} + } else if singleRef == "" { + singleRef = img.Name } else { return false } } - if singleRef == nil { - // Just use first canonical ref - singleRef = repoRefs[0] + if len(canonicalRefs) != 1 { + return false + } + + if singleRef != "" { + ref, err := creference.Parse(singleRef) + if err == nil { + _, ok := canonicalRefs[ref.Locator] + return ok + } + log.G(ctx).WithField("name", singleRef).Warnf("ignoring unparseable reference") } - _, ok := canonicalRefs[singleRef.Name()] - return len(canonicalRefs) == 1 && ok + return true + } // isImageIDPrefix returns whether the given possiblePrefix is a prefix of the @@ -219,71 +381,10 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool { return false } -// removeImageRefs removes a set of image references -// if the sync flag is set then garbage collection is -// is completed before returning -func (i *ImageService) removeImageRefs(ctx context.Context, img *cachedImage, refs []reference.Named, sync bool) ([]types.ImageDeleteResponseItem, error) { - records := []types.ImageDeleteResponseItem{} - - is := i.client.ImageService() - - for i, ref := range refs { - opts := []images.DeleteOpt{} - if sync && i == len(refs)-1 { - opts = append(opts, images.SynchronousDelete()) - } - if err := is.Delete(ctx, ref.String(), opts...); err != nil && !errdefs.IsNotFound(err) { - return records, errors.Wrapf(err, "failed to delete ref: %s", ref.String()) - } - - // TODO(containerd): do this? - //i.LogImageEvent(imgID.String(), imgID.String(), "untag") - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(ref)} - records = append(records, untaggedRecord) - } - - // TODO(containerd): clear from cache, get cache from arguments - img.m.Lock() - - // Note: refs is always sorted in same order as img.references - // since it must be created from img.references loop - var l, j int - for _, ref := range refs { - s := ref.String() - for j < len(img.references) && img.references[j].String() < s { - img.references[l] = img.references[j] - l++ - j++ - } - if j >= len(img.references) { - break - } - if img.references[j].String() == s { - // don't add - j++ - } - } - for j < len(img.references) { - img.references[l] = img.references[j] - l++ - j++ - } - // Shorten original - if l < len(img.references) { - img.references = img.references[:l] - } - - img.m.Unlock() - - return records, nil -} - // ImageDeleteConflict holds a soft or hard conflict and an associated error. // Implements the error interface. type imageDeleteConflict struct { hard bool - used bool imgID image.ID message string } @@ -301,133 +402,31 @@ func (idc *imageDeleteConflict) Error() string { func (idc *imageDeleteConflict) Conflict() {} -// imageDeleteHelper attempts to delete the given image from this daemon. If -// the image has any hard delete conflicts (child images or running containers -// using the image) then it cannot be deleted. If the image has any soft delete -// conflicts (any tags/digests referencing the image or any stopped container -// using the image) then it can only be deleted if force is true. If the delete -// succeeds and prune is true, the parent images are also deleted if they do -// not have any soft or hard delete conflicts themselves. Any deleted images -// and untagged references are appended to the given records. If any error or -// conflict is encountered, it will be returned immediately without deleting -// the image. If quiet is true, any encountered conflicts will be ignored and -// the function will return nil immediately without deleting the image. -func (i *ImageService) imageDeleteHelper(ctx context.Context, img *cachedImage, repoRefs []reference.Named, force, prune, quiet bool) ([]types.ImageDeleteResponseItem, error) { - // TODO(containerd): lock deletion, make reference removal and checks transactional in the cache? - log.G(ctx).Debugf("%s: Delete image with all references: %v", img.config.Digest, repoRefs) - - // First, determine if this image has any conflicts. Ignore soft conflicts - // if force is true. - c := conflictHard - if !force { - c |= conflictSoft - } - if conflict := i.checkImageDeleteConflict(img, c, len(repoRefs)); conflict != nil { - if quiet && (!i.imageIsDangling(img) || conflict.used) { - // TODO:(containerd): Is this expecting a no-op in all cases, since now - // remove image refs happens afterwards - log.G(ctx).Debugf("%s: ignoring conflict: %#v", img.config.Digest, conflict) - - // Ignore conflicts UNLESS the image is "dangling" or not being used in - // which case we want the user to know. - return nil, nil - } - log.G(ctx).Debugf("%s: remove conflict %v", img.config.Digest, conflict) - - // There was a conflict and it's either a hard conflict OR we are not - // forcing deletion on soft conflicts. - return nil, conflict - } - log.G(ctx).Debugf("%s: removing references", img.config.Digest) - - // TODO(containerd): Get list of configs and ChainIDs - - // Delete all repository tag/digest references to this image. - records, err := i.removeImageRefs(ctx, img, repoRefs, true) - if err != nil { - return records, err - } - - i.LogImageEvent(ctx, img.config.Digest.String(), img.config.Digest.String(), "delete") - records = append(records, types.ImageDeleteResponseItem{Deleted: img.config.Digest.String()}) - - // TODO(containerd): lock cache - // TODO(containerd): get all cached layers for chain ids - // TODO(containerd): for each layer, check current containerd namespace for reference - - // TODO(containerd): Snapshot integration will obsolete this section, - // containerd's garbage collector can own the removal of the layer - if img.layer != nil { - // TODO(containerd): Use function to get layer store - removedLayers, err := i.layerStores[runtime.GOOS].Release(img.layer) - if err != nil { - return records, err - } - - for _, removedLayer := range removedLayers { - records = append(records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) - } - } - - var parent *cachedImage - if img.parent != "" { - // TODO(containerd): pass cache in - c, err := i.getCache(ctx) - if err != nil { - return records, err - } - parent = c.byID(img.parent) - } - - if !prune || parent == nil { - return records, nil - } - - // We need to prune the parent image. This means delete it if there are - // no tags/digests referencing it and there are no containers using it ( - // either running or stopped). - // Do not force prunings, but do so quietly (stopping on any encountered - // conflicts). - parentRecords, err := i.imageDeleteHelper(ctx, parent, nil, false, true, true) - return append(records, parentRecords...), nil -} - // checkImageDeleteConflict determines whether there are any conflicts // preventing deletion of the given image from this daemon. A hard conflict is // any image which has the given image as a parent or any running container // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (i *ImageService) checkImageDeleteConflict(img *cachedImage, mask conflictType, deletedRefs int) *imageDeleteConflict { - // Check if the image has any descendant images. - // TODO(containerd): No use of image store - if mask&conflictDependentChild != 0 && len(img.children) > 0 { - return &imageDeleteConflict{ - hard: true, - imgID: image.ID(img.config.Digest), - message: "image has dependent child images", - } - } - +func (i *ImageService) checkImageDeleteConflict(img ocispec.Descriptor, mask conflictType, active bool) *imageDeleteConflict { if mask&conflictRunningContainer != 0 { // Check if any running container is using the image. running := func(c *container.Container) bool { - return c.IsRunning() && digest.Digest(c.ImageID) == img.config.Digest + return c.IsRunning() && digest.Digest(c.ImageID) == img.Digest } if container := i.containers.First(running); container != nil { return &imageDeleteConflict{ - imgID: image.ID(img.config.Digest), + imgID: image.ID(img.Digest), hard: true, - used: true, message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), } } } // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(img.references) > deletedRefs { + if mask&conflictActiveReference != 0 && active { return &imageDeleteConflict{ - imgID: image.ID(img.config.Digest), + imgID: image.ID(img.Digest), message: "image is referenced in multiple repositories", } } @@ -435,12 +434,11 @@ func (i *ImageService) checkImageDeleteConflict(img *cachedImage, mask conflictT if mask&conflictStoppedContainer != 0 { // Check if any stopped containers reference this image. stopped := func(c *container.Container) bool { - return !c.IsRunning() && digest.Digest(c.ImageID) == img.config.Digest + return !c.IsRunning() && digest.Digest(c.ImageID) == img.Digest } if container := i.containers.First(stopped); container != nil { return &imageDeleteConflict{ - imgID: image.ID(img.config.Digest), - used: true, + imgID: image.ID(img.Digest), message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } } @@ -448,12 +446,3 @@ func (i *ImageService) checkImageDeleteConflict(img *cachedImage, mask conflictT return nil } - -// imageIsDangling returns whether the given image is "dangling" which means -// that there are no repository references to the given image and it has no -// child images. -func (i *ImageService) imageIsDangling(img *cachedImage) bool { - // To find children, Docker keeps a cache of images along with parents, it - // can also keep a backpointer to parents in memory - return !(len(img.references) > 0 || len(img.children) > 0) -} diff --git a/daemon/images/image_delete_test.go b/daemon/images/image_delete_test.go index d91ff94a90b73..04b5bafa1f146 100644 --- a/daemon/images/image_delete_test.go +++ b/daemon/images/image_delete_test.go @@ -6,23 +6,25 @@ import ( "strings" "testing" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/docker/docker/container" digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { + type testImage struct { - names []string - image construct - // TODO(containerd): parent index + names []string + image construct + parent int expected []string deleted bool } - type testDelete struct { ref string id int // index of image to delete, if ref is empty @@ -45,7 +47,7 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { images: []testImage{ { names: []string{"docker.io/library/img1:latest"}, - image: randomManifest(1), + image: randomManifest(2), deleted: true, }, { @@ -67,26 +69,62 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { }, { ref: "img3:latest", - untagged: []string{"img3:latest", "img3;latest@2"}, + untagged: []string{"img3:latest", "img3:latest@2"}, + }, + }, + }, + { + name: "RemoveParentFirst", + images: []testImage{ + { + names: []string{"docker.io/library/img1:latest"}, + image: randomManifest(2), + deleted: true, + }, + { + names: []string{"docker.io/library/img2:latest"}, + image: randomManifest(2), + parent: -1, + deleted: true, + }, + }, + deletes: []testDelete{ + { + ref: "img1:latest", + untagged: []string{"img1:latest", "img1:latest@0"}, + deleted: []int{0}, + }, + { + ref: "img2:latest", + untagged: []string{"img2:latest", "img2:latest@1"}, + deleted: []int{1}, }, }, }, } { - ctx, cleanup, err := is.client.WithLease(ctx) - if err != nil { - t.Fatal(err) - } + var created []string t.Run(tc.name, func(t *testing.T) { var imgs []ocispec.Descriptor + deleted := map[digest.Digest]bool{} expected := map[string]*ocispec.Descriptor{} cis := is.client.ImageService() - for i, imagec := range tc.images { + cs := is.client.ContentStore() + ctx, cleanup, err := is.client.WithLease(ctx) + if err != nil { + t.Fatal(err) + } + + // TODO(containerd): store there per platform (map to img?) + var configs []ocispec.Descriptor + var chainIDs []digest.Digest + for _, imagec := range tc.images { var desc ocispec.Descriptor - if err := imagec.image(&desc)(ctx, is.client.ContentStore()); err != nil { - t.Fatal(err) + if err := imagec.image(&desc)(ctx, cs); err != nil { + t.Error(err) + break } for _, name := range imagec.names { @@ -110,7 +148,8 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { img.Name = img.Name + "@" + desc.Digest.String() _, err = cis.Create(ctx, img) if err != nil { - t.Fatal(err) + t.Error(err) + break } created = append(created, img.Name) expected[img.Name] = nil @@ -124,12 +163,45 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { expected[tag+"@"+desc.Digest.String()] = &desc } - // TODO(containerd): Unpack image and store layer + // TODO(containerd): Handle multiplatform cases (for each?) + m, err := images.Manifest(ctx, cs, desc, is.platforms) + if err != nil { + t.Fatal(err) + } + + if err := is.unpack(ctx, m.Config, m.Layers, nil, nil, nil); err != nil { + t.Fatal(err) + } - // TODO(containerd): Set parent + if imagec.parent < 0 { + parentImg := configs[len(configs)+imagec.parent] + + info := content.Info{ + Digest: m.Config.Digest, + Labels: map[string]string{ + LabelImageParent: parentImg.Digest.String(), + }, + } + info, err := cs.Update(ctx, info, "labels."+LabelImageParent) + if err != nil { + t.Fatal(err) + } + } + + diffIDs, err := images.RootFS(ctx, cs, m.Config) + if err != nil { + t.Fatal(err) + } + configs = append(configs, m.Config) + chainIDs = append(chainIDs, identity.ChainID(diffIDs)) imgs = append(imgs, desc) - t.Logf("Image %d: %s", i, desc.Digest.String()) + } + if err := cleanup(ctx); err != nil { + t.Fatal(err) + } + if t.Failed() { + t.FailNow() } is.containers = mockContainerStore{tc.containers} @@ -143,7 +215,7 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { t.Fatal(err) } if expected := len(del.deleted) + len(del.untagged); len(items) != expected { - t.Errorf("Wrong number of items: expected %d, actual %d", expected, len(items)) + t.Errorf("Wrong number of items: expected %d, actual %v", expected, items) } else { untags := map[string]struct{}{} for _, ut := range del.untagged { @@ -157,18 +229,21 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { if item.Deleted != "" { if _, ok := deletes[item.Deleted]; !ok { t.Errorf("Unexpected delete: %s", item.Deleted) + } else { + delete(deletes, item.Deleted) } } if item.Untagged != "" { if _, ok := untags[item.Untagged]; !ok { t.Errorf("Unexpected untag: %s", item.Untagged) + } else { + delete(untags, item.Untagged) } } } } } - cs := is.client.ContentStore() for _, img := range imgs { _, err := cs.Info(ctx, img.Digest) if err != nil { @@ -178,8 +253,11 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { if !deleted[img.Digest] { t.Errorf("Missing image %s", img.Digest) } + // Ensure layers are gone! } else if deleted[img.Digest] { t.Errorf("Expected image %s to be deleted", img.Digest) + } else { + // Ensure layers are there } } @@ -201,9 +279,6 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { } }) - if err := cleanup(ctx); err != nil { - t.Fatal(err) - } cis := is.client.ImageService() for i, name := range created { var opts []images.DeleteOpt diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 0e3d4ccd365f9..1e9e954633a72 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -339,7 +339,10 @@ func (i *ImageService) applyLayer(ctx context.Context, ls layer.Store, blob ocis rc := ioutil.NopCloser(content.NewReader(ra)) blobId := stringid.TruncateID(blob.Digest.String()) - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, rc), progressOutput, blob.Size, blobId, "Extracting") + reader := ioutils.NewCancelReadCloser(ctx, rc) + if progressOutput != nil { + reader = progress.NewProgressReader(reader, progressOutput, blob.Size, blobId, "Extracting") + } defer reader.Close() dc, err := compression.DecompressStream(reader) diff --git a/daemon/images/service_test.go b/daemon/images/service_test.go index ac7ca84d242d1..5d455f97823e7 100644 --- a/daemon/images/service_test.go +++ b/daemon/images/service_test.go @@ -25,8 +25,11 @@ import ( "github.com/containerd/containerd/services/server" srvconfig "github.com/containerd/containerd/services/server/config" "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" "github.com/pkg/errors" + "github.com/sirupsen/logrus" _ "github.com/containerd/containerd/diff/walking/plugin" _ "github.com/containerd/containerd/gc/scheduler" @@ -44,6 +47,11 @@ var ( pluginLoad sync.Once ) +func init() { + logrus.SetLevel(logrus.DebugLevel) + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + func loadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) { var err error pluginLoad.Do(func() { From 409437d641fe100c06ec0b37128d8df2e820478d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 18 Mar 2019 17:52:10 -0700 Subject: [PATCH 36/73] Fix pull lease cleanup to use new context Prevents lease getting left around after canceled context Signed-off-by: Derek McGowan --- daemon/images/image_pull.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 1e9e954633a72..612eab9740a4b 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -161,7 +161,11 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return err } - defer done(pctx) + defer func() { + if err := done(context.Background()); err != nil { + log.G(pctx).WithError(err).Errorf("failed to remove lease") + } + }() // TODO(containerd): Custom resolver // - Auth config From 9f5379d9f4daaba75165e66882178e153aa75417 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 18 Mar 2019 18:41:11 -0700 Subject: [PATCH 37/73] Add more rmi tests Add test for removing parent and child. Add checks for layer removal. Signed-off-by: Derek McGowan --- daemon/images/image_delete_test.go | 130 ++++++++++++++++++++++++----- 1 file changed, 107 insertions(+), 23 deletions(-) diff --git a/daemon/images/image_delete_test.go b/daemon/images/image_delete_test.go index 04b5bafa1f146..d390d1b5d6aa5 100644 --- a/daemon/images/image_delete_test.go +++ b/daemon/images/image_delete_test.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/docker/docker/container" + "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -18,12 +19,20 @@ import ( func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { type testImage struct { - names []string - image construct + names []string + image construct + + // Index of parent relative to current image, must be negative parent int + // Tags expected after deletion expected []string - deleted bool + + // Whether the image object is expected to exist after deletion + deleted bool + + // Whether layers are expected to exist after deletion + layers bool } type testDelete struct { ref string @@ -54,11 +63,13 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { names: []string{"docker.io/library/img2:latest"}, image: randomManifest(2), expected: []string{"docker.io/library/img2:latest"}, + layers: true, }, { names: []string{"docker.io/library/img3:latest", "docker.io/library/img4:latest"}, image: randomManifest(3), expected: []string{"docker.io/library/img4:latest"}, + layers: true, }, }, deletes: []testDelete{ @@ -101,13 +112,71 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { }, }, }, + { + name: "RemoveChild", + images: []testImage{ + { + names: []string{"docker.io/library/img1:latest"}, + image: randomManifest(2), + expected: []string{"docker.io/library/img1:latest"}, + layers: true, + }, + { + names: []string{"docker.io/library/img2:latest"}, + image: randomManifest(2), + parent: -1, + deleted: true, + }, + }, + deletes: []testDelete{ + { + ref: "img2:latest", + untagged: []string{"img2:latest", "img2:latest@1"}, + deleted: []int{1}, + }, + }, + }, + { + name: "RemoveParent", + images: []testImage{ + { + names: []string{"docker.io/library/img1:latest"}, + image: randomManifest(2), + deleted: true, + layers: true, + }, + { + names: []string{"docker.io/library/img2:latest"}, + expected: []string{"docker.io/library/img2:latest"}, + image: randomManifest(2), + parent: -1, + layers: true, + }, + }, + deletes: []testDelete{ + { + ref: "img1:latest", + untagged: []string{"img1:latest", "img1:latest@0"}, + deleted: []int{0}, + }, + }, + }, } { var created []string t.Run(tc.name, func(t *testing.T) { var imgs []ocispec.Descriptor + type finalState struct { + digest digest.Digest + deleted bool + + layersDeleted bool + // TODO(containerd): store by platform + config digest.Digest + diffIDs []digest.Digest + } + var states []finalState - deleted := map[digest.Digest]bool{} expected := map[string]*ocispec.Descriptor{} cis := is.client.ImageService() @@ -117,9 +186,6 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { t.Fatal(err) } - // TODO(containerd): store there per platform (map to img?) - var configs []ocispec.Descriptor - var chainIDs []digest.Digest for _, imagec := range tc.images { var desc ocispec.Descriptor if err := imagec.image(&desc)(ctx, cs); err != nil { @@ -155,9 +221,6 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { expected[img.Name] = nil } - if imagec.deleted { - deleted[desc.Digest] = true - } for _, tag := range imagec.expected { expected[tag] = &desc expected[tag+"@"+desc.Digest.String()] = &desc @@ -174,12 +237,12 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { } if imagec.parent < 0 { - parentImg := configs[len(configs)+imagec.parent] + parentImg := states[len(states)+imagec.parent] info := content.Info{ Digest: m.Config.Digest, Labels: map[string]string{ - LabelImageParent: parentImg.Digest.String(), + LabelImageParent: parentImg.config.String(), }, } info, err := cs.Update(ctx, info, "labels."+LabelImageParent) @@ -192,8 +255,14 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { if err != nil { t.Fatal(err) } - configs = append(configs, m.Config) - chainIDs = append(chainIDs, identity.ChainID(diffIDs)) + states = append(states, finalState{ + digest: desc.Digest, + deleted: imagec.deleted, + + layersDeleted: !imagec.layers, + config: m.Config.Digest, + diffIDs: diffIDs, + }) imgs = append(imgs, desc) } @@ -244,20 +313,35 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { } } - for _, img := range imgs { - _, err := cs.Info(ctx, img.Digest) + for _, state := range states { + _, err := cs.Info(ctx, state.digest) if err != nil { if !errdefs.IsNotFound(err) { t.Fatal(err) } - if !deleted[img.Digest] { - t.Errorf("Missing image %s", img.Digest) + if !state.deleted { + t.Errorf("Missing image %s", state.digest) + } + } else if state.deleted { + t.Errorf("Expected image %s to be deleted", state.digest) + } + if len(state.diffIDs) > 0 { + chainID := identity.ChainID(state.diffIDs) + ls := is.layerStores["vfs"] + l, err := ls.Get(layer.ChainID(chainID)) + if err != nil { + if err != layer.ErrLayerDoesNotExist { + t.Fatal(err) + } + if !state.layersDeleted { + t.Errorf("Missing image %s layer", state.digest) + } + } else { + layer.ReleaseAndLog(ls, l) + if state.layersDeleted { + t.Errorf("Expected image %s layers to be deleted", state.digest) + } } - // Ensure layers are gone! - } else if deleted[img.Digest] { - t.Errorf("Expected image %s to be deleted", img.Digest) - } else { - // Ensure layers are there } } From 2e4b735f19edba1d8a115496ad4e288bac18e2ca Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 19 Mar 2019 11:28:39 -0700 Subject: [PATCH 38/73] Add os platform matcher Update backends to match only on platform Signed-off-by: Derek McGowan --- daemon/daemon.go | 2 +- daemon/platform.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 daemon/platform.go diff --git a/daemon/daemon.go b/daemon/daemon.go index 9e1e62d2dddee..e1cbeab53a26f 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -965,7 +965,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.graphDrivers[driver.platform.OS] = ls.DriverName() backends = append(backends, images.LayerBackend{ Store: ls, - Platform: platforms.Any(driver.platform), + Platform: matchOS(driver.platform), }) } diff --git a/daemon/platform.go b/daemon/platform.go new file mode 100644 index 0000000000000..f747aed21e521 --- /dev/null +++ b/daemon/platform.go @@ -0,0 +1,16 @@ +package daemon + +import ( + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type osMatcher string + +func (os osMatcher) Match(p ocispec.Platform) bool { + return p.OS == string(os) +} + +func matchOS(p ocispec.Platform) platforms.Matcher { + return osMatcher(p.OS) +} From e57fc9ca49fee9e8ce6edafc9576adb5e5099c9e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 19 Mar 2019 17:36:00 -0700 Subject: [PATCH 39/73] Update commit to migrate layers Signed-off-by: Derek McGowan --- daemon/images/image_commit.go | 318 +++++++++++++++++++++++++++------- daemon/images/image_delete.go | 29 ++-- daemon/images/image_pull.go | 29 +++- daemon/images/images.go | 1 + 4 files changed, 305 insertions(+), 72 deletions(-) diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 928b869aa366b..09df56828cf8c 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -9,12 +9,15 @@ import ( "strings" "time" + "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" @@ -26,11 +29,17 @@ import ( // CommitImage creates a new image from a commit config func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) (ocispec.Descriptor, error) { - cache, err := i.getCache(ctx) + ctx, done, err := i.client.WithLease(ctx) if err != nil { return ocispec.Descriptor{}, err } + defer func() { + if err := done(context.Background()); err != nil { + log.G(ctx).WithError(err).Errorf("failed to remove lease") + } + }() + var img struct { ocispec.Image @@ -50,15 +59,12 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) if c.ParentImageID == "" { img.RootFS.Type = "layers" } else { - cache.m.RLock() - pci, ok := cache.idCache[digest.Digest(c.ParentImageID)] - cache.m.RUnlock() - - if !ok { - return ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "parent not found") + parent := ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: digest.Digest(c.ParentImageID), } - b, err := content.ReadBlob(ctx, i.client.ContentStore(), pci.config) + b, err := content.ReadBlob(ctx, i.client.ContentStore(), parent) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "unable to read config") } @@ -68,36 +74,31 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) } } - // TODO(containerd): get from container metadata - layerStore, err := i.getLayerStoreByOS(c.ContainerOS) - if err != nil { - return ocispec.Descriptor{}, err - } - rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) + cl, err := i.commitLayer(ctx, identity.ChainID(img.RootFS.DiffIDs), c) if err != nil { return ocispec.Descriptor{}, err } defer func() { - if rwTar != nil { - rwTar.Close() + if cl.layer != nil { + layer.ReleaseAndLog(cl.store, cl.layer) } }() - // TODO(containerd): Tee compressed output to content store - // for generation of the manifest. - l, err := layerStore.Register(rwTar, layer.ChainID(identity.ChainID(img.RootFS.DiffIDs))) + // Get compressed layer descriptors, migrate is needed + layers, err := i.compressedLayers(ctx, img.RootFS.DiffIDs) if err != nil { return ocispec.Descriptor{}, err } + // Create and write config created := time.Now().UTC() - diffID := l.DiffID() img.Created = &created - isEmptyLayer := layer.IsEmpty(diffID) + isEmptyLayer := layer.IsEmpty(layer.DiffID(cl.uncompressed.Digest)) if !isEmptyLayer { - img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(diffID)) + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, cl.uncompressed.Digest) + layers = append(layers, cl.compressed) } img.History = append(img.History, ocispec.History{ Author: c.Author, @@ -119,18 +120,20 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) config, err := json.Marshal(img) if err != nil { - layer.ReleaseAndLog(layerStore, l) return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") } desc := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, + MediaType: images.MediaTypeDockerSchema2Config, Digest: digest.FromBytes(config), Size: int64(len(config)), } - labels := map[string]string{ - fmt.Sprintf("%s%s", LabelLayerPrefix, layerStore.DriverName()): l.ChainID().String(), + labels := map[string]string{} + + if cl.layer != nil { + key := fmt.Sprintf("%s%s", LabelLayerPrefix, cl.store.DriverName()) + labels[key] = cl.layer.ChainID().String() } if c.ParentImageID != "" { @@ -141,60 +144,257 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) ref := fmt.Sprintf("config-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc, opts...); err != nil { - layer.ReleaseAndLog(layerStore, l) return ocispec.Descriptor{}, errors.Wrap(err, "unable to store config") } + // Create and write manifest + m := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: desc, + Layers: layers, + } + + mb, err := json.Marshal(m) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") + } + + desc = ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Manifest, + Digest: digest.FromBytes(mb), + Size: int64(len(mb)), + } + + labels = map[string]string{ + "containerd.io/gc.ref.content.config": m.Config.Digest.String(), + } + for i, l := range m.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.l%d", i)] = l.Digest.String() + } + + opts = []content.Opt{content.WithLabels(labels)} + ref = fmt.Sprintf("manifest-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(mb), desc, opts...); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to store manifest") + } + // Create a dangling image _, err = i.client.ImageService().Create(ctx, images.Image{ - // TODO(containerd): Add a name component here - Name: desc.Digest.String(), + // TODO(containerd): Add a more meaningful name component? + Name: "@" + desc.Digest.String(), Target: desc, CreatedAt: created, UpdatedAt: created, - Labels: map[string]string{ - // TODO(containerd): name can be used to determine this - LabelImageDangling: desc.Digest.String(), - }, }) if err != nil { - layer.ReleaseAndLog(layerStore, l) return ocispec.Descriptor{}, errors.Wrap(err, "unable to store image") } - cache.m.Lock() - layerKey := digest.Digest(l.ChainID()) - if _, ok := cache.layers[layerStore.DriverName()][layerKey]; !ok { - cache.layers[layerStore.DriverName()][layerKey] = l - } else { - // Image already retained, don't hold onto layer - defer layer.ReleaseAndLog(layerStore, l) + if cl.layer != nil { + cache, err := i.getCache(ctx) + if err != nil { + return ocispec.Descriptor{}, err + } + cache.m.Lock() + layerKey := digest.Digest(cl.layer.ChainID()) + if _, ok := cache.layers[cl.store.DriverName()][layerKey]; !ok { + cache.layers[cl.store.DriverName()][layerKey] = cl.layer + // Unset this to prevent defer from releasing + cl.layer = nil + } + cache.m.Unlock() + } + + return desc, nil +} + +type committedLayer struct { + uncompressed ocispec.Descriptor + compressed ocispec.Descriptor + layer layer.Layer + store layer.Store +} + +func (i *ImageService) commitLayer(ctx context.Context, parent digest.Digest, c backend.CommitConfig) (committedLayer, error) { + // TODO(containerd): get from container metadata + layerStore, err := i.getLayerStoreByOS(c.ContainerOS) + if err != nil { + return committedLayer{}, err + } + rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) + if err != nil { + return committedLayer{}, err } + defer rwTar.Close() + + cs := i.client.ContentStore() - // TODO(containerd): remove this, no longer used - if _, ok := cache.idCache[desc.Digest]; !ok { - ci := &cachedImage{ - config: desc, - parent: digest.Digest(c.ParentImageID), + // TODO(containerd): Handle unavailable error or use random id? + w, err := cs.Writer(ctx, content.WithRef("container-commit-"+c.ContainerID)) + if err != nil { + return committedLayer{}, err + } + defer func() { + if err := w.Close(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to close writer") } - cache.idCache[desc.Digest] = ci + }() + if err := w.Truncate(0); err != nil { + return committedLayer{}, err + } - // TODO(containerd): Refer to manifest here - cache.tCache[desc.Digest] = ci + dc, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return committedLayer{}, err + } - if ci.parent != "" { - pci, ok := cache.idCache[ci.parent] - if ok { - pci.m.Lock() - pci.children = append(pci.children, desc.Digest) - pci.m.Unlock() - } + l, err := layerStore.Register(io.TeeReader(rwTar, dc), layer.ChainID(parent)) + if err != nil { + return committedLayer{}, err + } + dc.Close() + + diffID := digest.Digest(l.DiffID()) + cdgst := w.Digest() + info, err := w.Status() + if err != nil { + return committedLayer{}, err + + } + size := info.Offset + if size == 0 { + return committedLayer{}, errors.New("empty write for layer") + } + + labels := map[string]string{ + "containerd.io/uncompressed": diffID.String(), + } + + if err := w.Commit(ctx, size, cdgst, content.WithLabels(labels)); err != nil { + if !cerrdefs.IsAlreadyExists(err) { + return committedLayer{}, err } + } - cache.m.Unlock() + return committedLayer{ + uncompressed: ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Layer, + Digest: diffID, + Size: -1, + }, + compressed: ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: cdgst, + Size: size, + }, + layer: l, + store: layerStore, + }, nil +} + +func (i *ImageService) compressedLayers(ctx context.Context, diffs []digest.Digest) ([]ocispec.Descriptor, error) { + var filters []string + for _, diff := range diffs { + filters = append(filters, fmt.Sprintf("labels.\"containerd.io/uncompressed\"==%s", diff.String())) + } + descs := make([]ocispec.Descriptor, len(diffs)) + + i.client.ContentStore().Walk(ctx, func(info content.Info) error { + udgst := digest.Digest(info.Labels["containerd.io/uncompressed"]) + for i, diff := range diffs { + if diff == udgst { + descs[i] = ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: info.Digest, + Size: info.Size, + } + } + } + return nil + }, filters...) - return desc, nil + for j, diff := range diffs { + if descs[j].Digest != "" { + continue + } + log.G(ctx).WithField("diff", diff).Debugf("compressed blob not found, migrating") + + // Look in all configured layer stores + for _, store := range i.layerBackends { + l, err := store.Get(layer.ChainID(identity.ChainID(diffs[:j+1]))) + if err != nil { + if err == layer.ErrLayerDoesNotExist { + continue + } + return nil, errors.Wrapf(err, "cannot get layer for %s", diff.String()) + } + defer layer.ReleaseAndLog(store, l) + + cs := i.client.ContentStore() + + // TODO(containerd): Handle unavailable and synchronize + w, err := cs.Writer(ctx, content.WithRef("layer-migrate-"+diff.String())) + if err != nil { + return nil, err + } + // Ensure any leftover data is abandoned + if err := w.Truncate(0); err != nil { + return nil, err + } + + dc, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return nil, err + } + + rc, err := l.TarStream() + if err != nil { + return nil, err + } + + n, err := io.Copy(dc, rc) + rc.Close() + if err != nil { + return nil, err + } + + dc.Close() + + labels := map[string]string{ + "containerd.io/uncompressed": diff.String(), + } + + cdgst := w.Digest() + if err := w.Commit(ctx, n, cdgst, content.WithLabels(labels)); err != nil { + if !cerrdefs.IsAlreadyExists(err) { + return nil, err + } + if err := w.Close(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to close writer") + } + } + + descs[j] = ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: cdgst, + Size: n, + } + break + } + + if descs[j].Digest == "" { + return nil, errdefs.NotFound(errors.New("layer not found")) + } + } + + return descs, nil } func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 5212adeaec421..15602a89be64d 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -155,13 +155,16 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, if err := is.Delete(ctx, ref); err != nil && !errdefs.IsNotFound(err) { return records, errors.Wrapf(err, "failed to delete ref: %s", ref) } - pref, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return records, errors.Wrapf(err, "failed to parse ref: %s", ref) + var fref string + if !strings.HasPrefix(ref, "<") { + pref, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return records, errors.Wrapf(err, "failed to parse ref: %s", ref) + } + fref = reference.FamiliarString(pref) + i.LogImageEvent(ctx, imgID, fref, "untag") + records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) } - fref := reference.FamiliarString(pref) - i.LogImageEvent(ctx, imgID, fref, "untag") - records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) } return records, nil } @@ -247,13 +250,15 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, if err := is.Delete(ctx, img.Name, opts...); err != nil && !errdefs.IsNotFound(err) { return records, errors.Wrapf(err, "failed to delete ref: %s", img.Name) } - pref, err := reference.ParseNormalizedNamed(img.Name) - if err != nil { - return records, errors.Wrapf(err, "failed to parse ref: %s", img.Name) + if !strings.HasPrefix(img.Name, "<") { + pref, err := reference.ParseNormalizedNamed(img.Name) + if err != nil { + return records, errors.Wrapf(err, "failed to parse ref: %s", img.Name) + } + fref := reference.FamiliarString(pref) + i.LogImageEvent(ctx, imgID, fref, "untag") + records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) } - fref := reference.FamiliarString(pref) - i.LogImageEvent(ctx, imgID, fref, "untag") - records = append(records, types.ImageDeleteResponseItem{Untagged: fref}) } // Lookup image to see if it was deleted diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 612eab9740a4b..3508987b27a12 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -360,7 +360,34 @@ func (i *ImageService) applyLayer(ctx context.Context, ls layer.Store, blob ocis parent = identity.ChainID(layers[:len(layers)-1]) } - return ls.Register(dc, layer.ChainID(parent)) + var r io.Reader + var dgstr digest.Digester + if dc.GetCompression() == compression.Gzip { + dgstr = digest.Canonical.Digester() + r = io.TeeReader(dc, dgstr.Hash()) + } else { + r = dc + } + + nl, err := ls.Register(r, layer.ChainID(parent)) + if err != nil { + return nil, err + } + + if dgstr != nil { + info := content.Info{ + Digest: blob.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": dgstr.Digest().String(), + }, + } + _, err := cs.Update(ctx, info, "labels.containerd.io/uncompressed") + if err != nil { + log.G(ctx).WithError(err).WithField("digest", blob.Digest.String()).Warnf("unable to set uncompressed label") + } + } + + return nl, nil } func getTagOrDigest(ref reference.Named) string { diff --git a/daemon/images/images.go b/daemon/images/images.go index be55f65636012..676bcee842790 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -283,6 +283,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al for _, img := range imgs { ref, err := reference.Parse(img.Name) if err != nil { + // TODO(containerd): Check for format such as @ continue } if named, ok := ref.(reference.Named); ok { From 189f0cb7893ed9708f08a50f072af6dbe5340f4a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 11:46:41 -0700 Subject: [PATCH 40/73] Update history to not rely on deprecated cache Signed-off-by: Derek McGowan --- daemon/images/image_history.go | 92 ++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 33 deletions(-) diff --git a/daemon/images/image_history.go b/daemon/images/image_history.go index 48bb501602abe..e5cff8be6865b 100644 --- a/daemon/images/image_history.go +++ b/daemon/images/image_history.go @@ -4,11 +4,12 @@ import ( "context" "encoding/json" "fmt" - "runtime" + "strings" "time" "github.com/containerd/containerd/content" - "github.com/docker/distribution/reference" + cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" "github.com/docker/docker/api/types/image" "github.com/docker/docker/layer" "github.com/opencontainers/image-spec/identity" @@ -20,12 +21,18 @@ import ( // name by walking the image lineage. func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image.HistoryResponseItem, error) { start := time.Now() - ci, err := i.getCachedRef(ctx, name) + desc, err := i.ResolveImage(ctx, name) if err != nil { return nil, err } - p, err := content.ReadBlob(ctx, i.client.ContentStore(), ci.config) + cs := i.client.ContentStore() + m, err := images.Manifest(ctx, cs, desc, i.platforms) + if err != nil { + return nil, err + } + + p, err := content.ReadBlob(ctx, cs, m.Config) if err != nil { return nil, errors.Wrap(err, "failed to read config") } @@ -41,6 +48,22 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image. rootFS := img.RootFS rootFS.DiffIDs = nil + info, err := cs.Info(ctx, m.Config.Digest) + if err != nil { + return nil, errors.Wrapf(err, "unable to get config %s", m.Config.Digest.String()) + } + var ls layer.Store + for k := range info.Labels { + if strings.HasPrefix(k, LabelLayerPrefix) { + name := k[len(LabelLayerPrefix):] + ils, ok := i.layerStores[name] + if ok { + ls = ils + break + } + } + } + for _, h := range img.History { var layerSize int64 @@ -49,17 +72,21 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image. return nil, fmt.Errorf("too many non-empty layers in History section") } rootFS.DiffIDs = append(rootFS.DiffIDs, img.RootFS.DiffIDs[layerCounter]) - l, err := i.layerStores[runtime.GOOS].Get(layer.ChainID(identity.ChainID(rootFS.DiffIDs))) - if err != nil { - return nil, err - } - layerSize, err = l.DiffSize() - layer.ReleaseAndLog(i.layerStores[runtime.GOOS], l) - if err != nil { - return nil, err - } layerCounter++ + + if ls != nil { + l, err := ls.Get(layer.ChainID(identity.ChainID(rootFS.DiffIDs))) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(ls, l) + if err != nil { + return nil, err + } + } + } history = append([]*image.HistoryResponseItem{{ @@ -71,33 +98,32 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image. }}, history...) } - c, err := i.getCache(ctx) - if err != nil { - return nil, err - } - - // Fill in image IDs and tags - histImg := ci - id := ci.config.Digest + //// Fill in image IDs + id := desc.Digest for _, h := range history { + // TODO(containerd): is it ok that parent IDs may not match images h.ID = id.String() - var tags []string - for _, r := range histImg.references { - if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, reference.FamiliarString(r)) - } - } + // TODO(containerd): fill in tags or just ignore + // var tags []string + // for _, r := range histImg.references { + // if _, ok := r.(reference.NamedTagged); ok { + // tags = append(tags, reference.FamiliarString(r)) + // } + // } - h.Tags = tags + // h.Tags = tags - id = histImg.parent - if id == "" { + parent := info.Labels[LabelImageParent] + if parent == "" { break } - histImg = c.byID(id) - if histImg == nil { - break + info, err = cs.Info(ctx, m.Config.Digest) + if err != nil { + if cerrdefs.IsNotFound(err) { + break + } + return nil, errors.Wrapf(err, "unable to get parent config %s", parent) } } imageActions.WithValues("history").UpdateSince(start) From ac32edad5794bc4fe1b4a03ee80a95ecc3746212 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 11:48:41 -0700 Subject: [PATCH 41/73] Show digest tags from dangling images Signed-off-by: Derek McGowan --- daemon/images/images.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/daemon/images/images.go b/daemon/images/images.go index 676bcee842790..c8bc5efdcb107 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -283,6 +283,11 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al for _, img := range imgs { ref, err := reference.Parse(img.Name) if err != nil { + if strings.HasPrefix(img.Name, "<") { + if idx := strings.Index(img.Name, ">@"); idx > 0 { + digests["none"+img.Name[idx+1:]] = struct{}{} + } + } // TODO(containerd): Check for format such as @ continue } From d4036ec3a011899dbe15ade8adea0a479c50d267 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 13:51:42 -0700 Subject: [PATCH 42/73] Add size to docker images Signed-off-by: Derek McGowan --- daemon/images/images.go | 43 +++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/daemon/images/images.go b/daemon/images/images.go index c8bc5efdcb107..b76f6267516ac 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -10,7 +10,6 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -240,11 +239,8 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: config = target default: - // TODO(containerd): use global platforms matcher - platform := platforms.Default() - // TODO(containerd): config matcher which ignores NotFound items? - desc, err := images.Config(ctx, cs, imgs[0].Target, platform) + desc, err := images.Config(ctx, cs, imgs[0].Target, i.platforms) if err != nil { log.G(ctx).WithError(err).WithField("image", dgst.String()).Warnf("unable to resolve config") continue @@ -254,20 +250,39 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al // TODO(containerd): Stat config if info, err := cs.Info(ctx, config.Digest); err == nil { + var sizeSet bool + var size int64 for label, value := range info.Labels { if label == LabelImageParent { newImage.ParentID = value - } else if strings.HasPrefix(label, LabelLayerPrefix) { - // TODO: Lookup from layer store + } else if !sizeSet && strings.HasPrefix(label, LabelLayerPrefix) { + name := label[len(LabelLayerPrefix):] + ls, ok := i.layerStores[name] + if ok { + l, err := ls.Get(layer.ChainID(value)) + if err != nil { + log.G(ctx).WithError(err).WithField("driver", name).WithField("layer", name).Warnf("unable to get layer") + continue + } + size, err = l.Size() + layer.ReleaseAndLog(ls, l) + if err != nil { + log.G(ctx).WithError(err).WithField("driver", name).WithField("layer", name).Warnf("unable to get layer size") + continue + } + + break + } } - // TODO(containerd): Store size in label + // TODO(containerd): Allow size in label? } - // TODO(containerd): Resolve config for current platform - // TODO(containerd): Fill this in from config and content labels - //newImage.Size = size - //newImage.VirtualSize = size - //newImage.SharedSize = -1 - //newImage.Containers = -1 + + newImage.Size = size + newImage.VirtualSize = size + newImage.SharedSize = -1 + newImage.Containers = -1 + + // TODO(containerd): read config and set labels //if image.Config != nil { // newImage.Labels = image.Config.Labels //} From 479c9a378cf6ea5270df65190b9bb1849c773f3b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 15:25:18 -0700 Subject: [PATCH 43/73] Improve platform matching for containers and pull By default match a single architecture platform for pull. Use container platform for resolving layerstore. Use container driver name when available to resolve layerstore. Signed-off-by: Derek McGowan --- daemon/container.go | 4 ++-- daemon/daemon.go | 6 ++--- daemon/delete.go | 2 +- daemon/export.go | 4 ++-- daemon/images/image_commit.go | 9 +++++-- daemon/images/service.go | 44 ++++++++++++++--------------------- daemon/platform.go | 16 ------------- 7 files changed, 32 insertions(+), 53 deletions(-) delete mode 100644 daemon/platform.go diff --git a/daemon/container.go b/daemon/container.go index 87cec59d03abf..d2e1715e2dc42 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -163,8 +163,8 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, h base.ImageID = image.ID(img.Config.Digest) base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name - // TODO(containerd): Rename this function or pass it in, get it after layer created? - base.Driver = daemon.imageService.GraphDriverForOS(img.Platform.OS) + // TODO(containerd): set this after layer created based on the graph driver + base.Driver = daemon.imageService.DriverName(img.Platform) base.OS = img.Platform.OS // TODO(containerd): Set architecture // TODO(containerd): Set variant diff --git a/daemon/daemon.go b/daemon/daemon.go index e1cbeab53a26f..7db59c8fce933 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -965,7 +965,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.graphDrivers[driver.platform.OS] = ls.DriverName() backends = append(backends, images.LayerBackend{ Store: ls, - Platform: matchOS(driver.platform), + Platform: platforms.Only(driver.platform), }) } @@ -1262,8 +1262,8 @@ func (daemon *Daemon) Mount(container *container.Container) error { // on non-Windows operating systems. if runtime.GOOS != "windows" { daemon.Unmount(container) - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir) + return fmt.Errorf("driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + container.Driver, container.ID, container.BaseFS, dir) } } container.BaseFS = dir // TODO: combine these fields diff --git a/daemon/delete.go b/daemon/delete.go index 482e7d1270d37..d3843ec3306d4 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -118,7 +118,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo // When container creation fails and `RWLayer` has not been created yet, we // do not call `ReleaseRWLayer` if container.RWLayer != nil { - err := daemon.imageService.ReleaseLayer(container.RWLayer, container.OS) + err := daemon.imageService.ReleaseLayer(container.RWLayer, container.Driver) if err != nil { err = errors.Wrapf(err, "container %s", container.ID) container.SetRemovalError(err) diff --git a/daemon/export.go b/daemon/export.go index 01593f4e8a4f4..2ec189d10b2e5 100644 --- a/daemon/export.go +++ b/daemon/export.go @@ -51,13 +51,13 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R if !system.IsOSSupported(container.OS) { return nil, fmt.Errorf("cannot export %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) } - rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) + rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.Driver) if err != nil { return nil, err } defer func() { if err != nil { - daemon.imageService.ReleaseLayer(rwlayer, container.OS) + daemon.imageService.ReleaseLayer(rwlayer, container.Driver) } }() diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 09df56828cf8c..9c71ae2bf757b 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -14,6 +14,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" @@ -222,8 +223,12 @@ type committedLayer struct { } func (i *ImageService) commitLayer(ctx context.Context, parent digest.Digest, c backend.CommitConfig) (committedLayer, error) { - // TODO(containerd): get from container metadata - layerStore, err := i.getLayerStoreByOS(c.ContainerOS) + // TODO(containerd): get driver name from container metadata + p := platforms.DefaultSpec() + p.OS = c.ContainerOS + p.OSVersion = "" + p.OSFeatures = nil + layerStore, err := i.getLayerStore(p) if err != nil { return committedLayer{}, err } diff --git a/daemon/images/service.go b/daemon/images/service.go index 1af0dde7f4c94..cdce223d5c98f 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -227,16 +227,12 @@ func (i *ImageService) getLayerStore(platform ocispec.Platform) (layer.Store, er return nil, errdefs.Unavailable(errors.Errorf("no layer storage backend configured for %s", platform.OS)) } -func (i *ImageService) getLayerStoreByOS(os string) (layer.Store, error) { - return i.getLayerStore(ocispec.Platform{OS: os}) -} - // GetLayerByID returns a layer by ID and operating system // called from daemon.go Daemon.restore(), and Daemon.containerExport() -func (i *ImageService) GetLayerByID(cid string, os string) (layer.RWLayer, error) { - ls, err := i.getLayerStoreByOS(os) - if err != nil { - return nil, err +func (i *ImageService) GetLayerByID(cid string, driver string) (layer.RWLayer, error) { + ls, ok := i.layerStores[driver] + if !ok { + return nil, errdefs.NotFound(errors.Errorf("driver not found: %s", driver)) } return ls.GetRWLayer(cid) @@ -256,10 +252,10 @@ func (i *ImageService) LayerStoreStatus() map[string][][2]string { // called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) // TODO: needs to be refactored to Unmount (see callers), or removed and replaced // with GetLayerByID -func (i *ImageService) GetLayerMountID(cid string, os string) (string, error) { - ls, err := i.getLayerStoreByOS(os) - if err != nil { - return "", err +func (i *ImageService) GetLayerMountID(cid string, driver string) (string, error) { + ls, ok := i.layerStores[driver] + if !ok { + return "", errdefs.NotFound(errors.Errorf("driver not found: %s", driver)) } return ls.GetMountID(cid) @@ -275,32 +271,26 @@ func (i *ImageService) Cleanup() { } } -// GraphDriverForOS returns the name of the graph drvier -// moved from Daemon.GraphDriverName, used by: -// - newContainer -// - to report an error in Daemon.Mount(container) -func (i *ImageService) GraphDriverForOS(os string) string { - ls, err := i.getLayerStoreByOS(os) +// GraphDriverForOS returns the name of the graph driver for the given platform +func (i *ImageService) DriverName(p ocispec.Platform) string { + ls, err := i.getLayerStore(p) if err != nil { - // TODO(containerd): more graceful return is possible - panic(err) + return "" } return ls.DriverName() } // ReleaseLayer releases a layer allowing it to be removed -// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() -func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) error { - ls, err := i.getLayerStoreByOS(containerOS) - if err != nil { - return err +func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, driver string) error { + ls, ok := i.layerStores[driver] + if !ok { + return errdefs.NotFound(errors.Errorf("driver not found: %s", driver)) } metadata, err := ls.ReleaseRWLayer(rwlayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { - return errors.Wrapf(err, "driver %q failed to remove root filesystem", - i.layerStores[containerOS].DriverName()) + return errors.Wrapf(err, "driver %q failed to remove root filesystem", ls.DriverName()) } return nil } diff --git a/daemon/platform.go b/daemon/platform.go deleted file mode 100644 index f747aed21e521..0000000000000 --- a/daemon/platform.go +++ /dev/null @@ -1,16 +0,0 @@ -package daemon - -import ( - "github.com/containerd/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type osMatcher string - -func (os osMatcher) Match(p ocispec.Platform) bool { - return p.OS == string(os) -} - -func matchOS(p ocispec.Platform) platforms.Matcher { - return osMatcher(p.OS) -} From f81d226e2a42fa428e5856005a0b624ce2302391 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 16:06:28 -0700 Subject: [PATCH 44/73] Remove deprecated cache Use layer cache when necessary or containerd client directly Signed-off-by: Derek McGowan --- daemon/images/cache.go | 208 +-------------------------------- daemon/images/image.go | 155 ------------------------ daemon/images/image_events.go | 28 +---- daemon/images/image_inspect.go | 2 +- daemon/images/service.go | 69 +++-------- daemon/list.go | 4 +- 6 files changed, 30 insertions(+), 436 deletions(-) diff --git a/daemon/images/cache.go b/daemon/images/cache.go index f12435f6c78a6..03347afc450c0 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -6,67 +6,18 @@ import ( "sync" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/digestset" - "github.com/docker/distribution/reference" "github.com/docker/docker/builder" buildcache "github.com/docker/docker/image/cache" "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) -type cachedImage struct { - config ocispec.Descriptor - parent digest.Digest - - // Mutable values - m sync.Mutex - references []reference.Named - children []digest.Digest - - // Layer held by Docker, this should get removed when - // moved to containerd snapshotters. The garbage - // collection in containerd is reasonable for cleanup. - layer layer.Layer -} - type cache struct { - m sync.RWMutex - ids *digestset.Set - descriptors map[digest.Digest]ocispec.Descriptor - layers map[string]map[digest.Digest]layer.Layer - - // idCache maps Docker identifiers - // deprecated - idCache map[digest.Digest]*cachedImage - // tCache maps target digests to images - // deprecated - tCache map[digest.Digest]*cachedImage -} - -func (c *cache) byID(id digest.Digest) *cachedImage { - c.m.RLock() - img, ok := c.idCache[id] - c.m.RUnlock() - if !ok { - return nil - } - return img -} - -func (c *cache) byTarget(target digest.Digest) *cachedImage { - c.m.RLock() - img, ok := c.tCache[target] - c.m.RUnlock() - if !ok { - return nil - } - return img + m sync.RWMutex + layers map[string]map[digest.Digest]layer.Layer } // LoadCache loads the image cache by scanning containerd images @@ -90,15 +41,8 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach var ( cs = i.client.ContentStore() - is = i.client.ImageService() c = &cache{ - descriptors: map[digest.Digest]ocispec.Descriptor{}, - layers: map[string]map[digest.Digest]layer.Layer{}, - - // Deprecated - ids: digestset.NewSet(), - idCache: map[digest.Digest]*cachedImage{}, - tCache: map[digest.Digest]*cachedImage{}, + layers: map[string]map[digest.Digest]layer.Layer{}, } ) @@ -128,157 +72,11 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach c.layers[name] = backendCache } - // TODO(containerd): This must use some streaming approach - imgs, err := is.List(ctx) - if err != nil { - return nil, err - } - - for _, img := range imgs { - var ( - named reference.Named - id ocispec.Descriptor - ) - - if danglingID, ok := img.Labels[LabelImageDangling]; !ok { - ref, err := reference.Parse(img.Name) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image name") - continue - } - var ok bool - named, ok = ref.(reference.Named) - if !ok { - log.G(ctx).WithField("name", img.Name).Debug("skipping invalid image name with no name component") - continue - } - } else { - dgst, err := digest.Parse(danglingID) - if err != nil { - log.G(ctx).WithError(err).WithField("id", danglingID).Debug("skipping invalid image id label (dangling)") - continue - } - id = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: dgst, - } - } - - ci := c.tCache[img.Target.Digest] - if ci == nil { - if img.Target.MediaType == images.MediaTypeDockerSchema2Config || img.Target.MediaType == ocispec.MediaTypeImageConfig { - id = img.Target - } - if id.Digest == "" { - idstr, ok := img.Labels[LabelImageID] - if !ok { - cs := i.client.ContentStore() - // TODO(containerd): resolve architecture from context - // TODO(containerd): support multi-platform images - platform := platforms.Default() - desc, err := images.Config(ctx, cs, img.Target, platform) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to resolve image config for platform") - continue - } - id = desc - } else { - dgst, err := digest.Parse(idstr) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid image id label") - continue - } - id = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: dgst, - } - } - } - - ci = c.idCache[id.Digest] - if ci == nil { - ci = &cachedImage{ - config: id, - } - if s := img.Labels[LabelImageParent]; s != "" { - pid, err := digest.Parse(s) - if err != nil { - log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping invalid parent label") - } else { - ci.parent = pid - } - } - //diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), ci.config) - //if err != nil { - // log.G(ctx).WithError(err).WithField("name", img.Name).Debug("unable to load image rootfs") - // continue - //} - - //// TODO(containerd): choose correct platform - //ci.layer, err = i.backends[0].Get(layer.ChainID(identity.ChainID(diffIDs))) - //if err != nil { - // log.G(ctx).WithError(err).WithField("name", img.Name).Debug("no layer for image") - // continue - //} - - c.idCache[id.Digest] = ci - c.ids.Add(id.Digest) - } - c.tCache[img.Target.Digest] = ci - c.descriptors[img.Target.Digest] = img.Target - - // Load image layer to prevent removal - } - if named != nil { - ci.addReference(named) - } - } i.cache[namespace] = c return c, nil } -func (ci *cachedImage) addReference(named reference.Named) { - var ( - i int - s = named.String() - ) - - // Add references, add in sorted place - for ; i < len(ci.references); i++ { - if rs := ci.references[i].String(); s < rs { - ci.references = append(ci.references, nil) - copy(ci.references[i+1:], ci.references[i:]) - ci.references[i] = named - break - } else if rs == s { - break - } - } - if i == len(ci.references) { - ci.references = append(ci.references, named) - } -} - -func (ci *cachedImage) addChild(d digest.Digest) { - var i int - - // Add references, add in sorted place - for ; i < len(ci.children); i++ { - if d < ci.children[i] { - ci.children = append(ci.children, "") - copy(ci.children[i+1:], ci.children[i:]) - ci.children[i] = d - break - } else if ci.children[i] == d { - break - } - } - if i == len(ci.children) { - ci.children = append(ci.children, d) - } -} - func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { namespace, ok := namespaces.Namespace(ctx) if !ok { diff --git a/daemon/images/image.go b/daemon/images/image.go index c94f41acd6a43..bbab1dc1a835d 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -20,21 +20,10 @@ import ( ) const ( - // LabelImageID refers to the image ID used by Docker - // Deprecate this to support multi-arch images - LabelImageID = "docker.io/image.id" - // LabelImageParent is Docker's parent image ID // Stored on the image blob (config or manifest) LabelImageParent = "containerd.io/gc.ref.content.parent" - // LabelImageDangling refers to images with no name - // Stored on images and points to the image config digest - // TODO(containerd): Deprecate this, use name@hash approach - // to hold onto images and avoid calculating the dangling - // property after every retag - LabelImageDangling = "docker.io/image.dangling" - // LabelLayerPrefix is used as the label prefix for layer stores // Stores the layer reference in the given layerstore. // The value always represents the digest of the ChainID @@ -57,15 +46,6 @@ func (e ErrImageDoesNotExist) Error() string { // NotFound implements the NotFound interface func (e ErrImageDoesNotExist) NotFound() {} -func (i *ImageService) GetImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { - ci, err := i.getCachedRef(ctx, refOrID) - if err != nil { - return ocispec.Descriptor{}, err - } - - return ci.config, nil -} - // SearchImage searches for an image based on the given // reference or identifier. Returns the descriptor of // the image, could be manifest list, manifest, or config. @@ -77,14 +57,6 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe is := i.client.ImageService() - c, err := i.getCache(ctx) - if err != nil { - return ocispec.Descriptor{}, err - } - - c.m.RLock() - defer c.m.RUnlock() - namedRef, ok := parsed.(reference.Named) if !ok { digested, ok := parsed.(reference.Digested) @@ -92,12 +64,6 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe return ocispec.Descriptor{}, errdefs.InvalidParameter(errors.New("bad reference")) } - // Check if descriptor is cached - desc, ok := c.descriptors[digested.Digest()] - if ok { - return desc, nil - } - imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to lookup digest") @@ -393,124 +359,3 @@ func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) Image: &img, }, nil } - -func (i *ImageService) getReferences(ctx context.Context, imageID digest.Digest) ([]reference.Named, error) { - c, err := i.getCache(ctx) - if err != nil { - return nil, err - } - img := c.byID(imageID) - if img == nil { - return nil, errdefs.NotFound(errors.New("no image with given id")) - } - - return img.references, nil -} - -func (i *ImageService) getCachedRef(ctx context.Context, ref string) (*cachedImage, error) { - img, err := i.getImageByRef(ctx, ref) - if err != nil { - return nil, err - } - return img.cached, nil -} - -type imageLink struct { - name reference.Named - target *ocispec.Descriptor - cached *cachedImage -} - -func (i *ImageService) getImageByRef(ctx context.Context, ref string) (imageLink, error) { - parsed, err := reference.ParseAnyReference(ref) - if err != nil { - return imageLink{}, err - } - - c, err := i.getCache(ctx) - if err != nil { - return imageLink{}, err - } - - c.m.RLock() - defer c.m.RUnlock() - - namedRef, ok := parsed.(reference.Named) - if !ok { - digested, ok := parsed.(reference.Digested) - if !ok { - return imageLink{}, errdefs.InvalidParameter(errors.New("bad reference")) - } - - ci, ok := c.idCache[digested.Digest()] - if !ok { - return imageLink{}, errdefs.NotFound(errors.New("id not found")) - } - return imageLink{ - cached: ci, - }, nil - } - - img, err := i.client.ImageService().Get(ctx, namedRef.String()) - if err != nil { - if !cerrdefs.IsNotFound(err) { - return imageLink{}, err - } - dgst, err := c.ids.Lookup(ref) - if err != nil { - return imageLink{}, errdefs.NotFound(errors.New("reference not found")) - } - ci, ok := c.idCache[dgst] - if !ok { - return imageLink{}, errdefs.NotFound(errors.New("id not found")) - } - return imageLink{ - cached: ci, - }, nil - } - ci, ok := c.tCache[img.Target.Digest] - if !ok { - // TODO(containerd): Update cache and return - return imageLink{}, errdefs.NotFound(errors.New("id not found")) - } - - return imageLink{ - name: namedRef, - target: &img.Target, - cached: ci, - }, nil -} - -func (i *ImageService) updateCache(ctx context.Context, img imageLink) error { - c, err := i.getCache(ctx) - if err != nil { - return err - } - - img.cached.m.Lock() - img.cached.addReference(img.name) - img.cached.m.Unlock() - - var parent *cachedImage - - c.m.Lock() - if _, ok := c.tCache[img.target.Digest]; !ok { - c.tCache[img.target.Digest] = img.cached - } - if _, ok := c.idCache[img.cached.config.Digest]; !ok { - c.idCache[img.cached.config.Digest] = img.cached - c.ids.Add(img.cached.config.Digest) - } - if img.cached.parent != "" { - parent = c.idCache[img.cached.parent] - } - c.m.Unlock() - - if parent != nil { - parent.m.Lock() - parent.addChild(img.cached.config.Digest) - parent.m.Unlock() - } - - return nil -} diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index 0d79914ae083c..1a513307a70c8 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -2,9 +2,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "encoding/json" - "github.com/containerd/containerd/content" "github.com/docker/docker/api/types/events" ) @@ -34,25 +32,9 @@ func (i *ImageService) LogImageEvent(ctx context.Context, imageID, refName, acti } func (i *ImageService) getImageLabels(ctx context.Context, imageID string) (map[string]string, error) { - img, err := i.GetImage(ctx, imageID) - if err != nil { - return nil, err - } - - p, err := content.ReadBlob(ctx, i.client.ContentStore(), img) - if err != nil { - return nil, err - } - - var config struct { - Config struct { - Labels map[string]string - } - } - - if err := json.Unmarshal(p, &config); err != nil { - return nil, err - } - - return config.Config.Labels, nil + return nil, nil + // TODO(containerd): why is this expensive operation necessary + // would require resolving imageID to manifest, then reading + // and unmarshalling the config, this would also require + // resolving manifest list if imageID is a manifest list } diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index c6674f8cf358b..c9483af98ad6f 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -57,7 +57,7 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima config, err := images.Config(ctx, cs, desc, i.platforms) if err != nil { - // TODO(containerd): handle case where config fails to resume + // TODO(containerd): handle case where config fails to resolve // due to missing data caused by multiple matches return nil, errors.Wrap(err, "failed to resolve config") } diff --git a/daemon/images/service.go b/daemon/images/service.go index cdce223d5c98f..2164cee59a5f7 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -8,7 +8,6 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" @@ -22,7 +21,6 @@ import ( dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -298,66 +296,37 @@ func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, driver string) error // LayerDiskUsage returns the number of bytes used by layer stores // called from disk_usage.go func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { - var allLayersSize int64 - layerRefs, err := i.getLayerRefs(ctx) - if err != nil { - return 0, err - } - for _, ls := range i.layerStores { - allLayers := ls.Map() - for _, l := range allLayers { - select { - case <-ctx.Done(): - return allLayersSize, ctx.Err() - default: - size, err := l.DiffSize() - if err == nil { - if _, ok := layerRefs[digest.Digest(l.ChainID())]; ok { - allLayersSize += size - } - } else { - logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) - } - } - } - } - return allLayersSize, nil -} - -func (i *ImageService) getLayerRefs(ctx context.Context) (map[digest.Digest]int, error) { c, err := i.getCache(ctx) if err != nil { - return nil, err + return 0, err } - // Create copy and unlock cache + var layers []layer.Layer c.m.RLock() - imgs := make(map[digest.Digest]*cachedImage, len(c.idCache)) - for dgst, ci := range c.idCache { - imgs[dgst] = ci + for _, lm := range c.layers { + for _, l := range lm { + layers = append(layers, l) + } } c.m.RUnlock() - layerRefs := map[digest.Digest]int{} - for _, img := range imgs { - if len(img.references) == 0 && len(img.children) != 0 { - continue - } + // TODO(containerd): Get from containerd snapshotters also - diffIDs, err := images.RootFS(ctx, i.client.ContentStore(), img.config) - if err != nil { - if errdefs.IsNotFound(err) { - continue + var allLayersSize int64 + for _, l := range layers { + select { + case <-ctx.Done(): + return allLayersSize, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + allLayersSize += size + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) } - return nil, errors.Wrap(err, "failed to resolve rootfs") - } - - for i := range diffIDs { - layerRefs[identity.ChainID(diffIDs[:i+1])]++ } } - - return layerRefs, nil + return allLayersSize, nil } // UpdateConfig values diff --git a/daemon/list.go b/daemon/list.go index d03edfa79faee..26e238b8a2cb9 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -318,7 +318,7 @@ func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, confi if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(ctx, ancestor) + img, err := daemon.imageService.ResolveImage(ctx, ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil @@ -585,7 +585,7 @@ func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*ty c := s.Container updated := s.Image // keep the original ref if still valid (hasn't changed) if updated != s.ImageID { - img, err := daemon.imageService.GetImage(context.TODO(), updated) + img, err := daemon.imageService.ResolveImage(context.TODO(), updated) if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } From d02df560f5eef27874483ae1d4b2f432618bcc84 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 17:36:24 -0700 Subject: [PATCH 45/73] Fix docker images filters Support dangling only and reference lookups Signed-off-by: Derek McGowan --- daemon/images/images.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/daemon/images/images.go b/daemon/images/images.go index b76f6267516ac..161ef7b2a3299 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -94,13 +94,20 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al } var filters []string - if danglingOnly { - filters = append(filters, "name~=/sha256:[a-z0-9]+/") - } else if imageFilters.Contains("reference") { + if imageFilters.Contains("reference") { for _, v := range imageFilters.Get("reference") { - // TODO(containerd): Parse reference, if only partial match then - // use as regex - filters = append(filters, "name=="+v) + named, err := reference.ParseNormalizedNamed(v) + if err != nil { + return nil, invalidFilter{"reference", v} + } + + // TODO(containerd): handle canonical names + if nt, ok := named.(reference.NamedTagged); ok { + filters = append(filters, "name=="+nt.String()) + } else { + escaped := strings.Replace(named.Name(), "/", "\\/", -1) + filters = append(filters, fmt.Sprintf("name~=/%s:.*/", escaped)) + } } } @@ -323,8 +330,8 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al } if len(newImage.RepoTags) == 0 { - // TODO(containerd): also skip if has children - if !all { + // TODO(containerd): also skip if has children? + if !all && !danglingOnly { continue } @@ -343,6 +350,8 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al newImage.RepoTags = []string{"none@none"} } newImage.RepoTags = []string{"none:none"} + } else if danglingOnly { + continue } imageSums = append(imageSums, newImage) From c9d736985fd1e96404344638c466c140384ea42c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 Mar 2019 17:45:04 -0700 Subject: [PATCH 46/73] Optimize image list to skip size on skipped images Signed-off-by: Derek McGowan --- daemon/images/images.go | 208 ++++++++++++++++++---------------------- 1 file changed, 91 insertions(+), 117 deletions(-) diff --git a/daemon/images/images.go b/daemon/images/images.go index 161ef7b2a3299..5e9ff0397ef3d 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -159,74 +159,10 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al m[img.Target.Digest] = append(m[img.Target.Digest], img) created[img.Target.Digest] = info.CreatedAt - //var size int64 - // TODO: this seems pretty dumb to do - // Maybe we resolve a config and add size as a config label - //layerID := img.RootFS.ChainID() - //if layerID != "" { - // l, err := i.layerStores[img.OperatingSystem()].Get(layerID) - // if err != nil { - // // The layer may have been deleted between the call to `Map()` or - // // `Heads()` and the call to `Get()`, so we just ignore this error - // if err == layer.ErrLayerDoesNotExist { - // continue - // } - // return nil, err - // } - - // size, err = l.Size() - // layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) - // if err != nil { - // return nil, err - // } - //} - - //newImage := newImage(img, size) - // TODO: Resolve config blob to get extra metadata // TODO: Store by target // TODO: Defer creation of image summary - //if withExtraAttrs { - // // lazily init variables - // if imagesMap == nil { - // allContainers = i.containers.List() - - // // allLayers is built from all layerstores combined - // allLayers = make(map[layer.ChainID]layer.Layer) - // for _, ls := range i.layerStores { - // layers := ls.Map() - // for k, v := range layers { - // allLayers[k] = v - // } - // } - // imagesMap = make(map[*image.Image]*types.ImageSummary) - // layerRefs = make(map[layer.ChainID]int) - // } - - // // Get container count - // newImage.Containers = 0 - // for _, c := range allContainers { - // if c.ImageID == id { - // newImage.Containers++ - // } - // } - - // // count layer references - // rootFS := *img.RootFS - // rootFS.DiffIDs = nil - // for _, id := range img.RootFS.DiffIDs { - // rootFS.Append(id) - // chid := rootFS.ChainID() - // layerRefs[chid]++ - // if _, ok := allLayers[chid]; !ok { - // return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) - // } - // } - // imagesMap[img] = newImage - //} - - //images = append(images, newImage) } imageSums := []*types.ImageSummary{} @@ -239,6 +175,62 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al newImage.ID = dgst.String() newImage.Created = created[dgst].Unix() + // For these, unique them by manifest, none:none or none@digest + digests := map[string]struct{}{} + tags := map[string]struct{}{} + + for _, img := range imgs { + ref, err := reference.Parse(img.Name) + if err != nil { + if strings.HasPrefix(img.Name, "<") { + if idx := strings.Index(img.Name, ">@"); idx > 0 { + digests["none"+img.Name[idx+1:]] = struct{}{} + } + } + // TODO(containerd): Check for format such as @ + continue + } + if named, ok := ref.(reference.Named); ok { + if c, ok := named.(reference.Canonical); ok { + digests[reference.FamiliarString(c)] = struct{}{} + } else if t, ok := named.(reference.Tagged); ok { + tags[reference.FamiliarString(t)] = struct{}{} + } + } + } + + for d := range digests { + newImage.RepoDigests = append(newImage.RepoDigests, d) + } + for t := range tags { + newImage.RepoTags = append(newImage.RepoTags, t) + } + + if len(newImage.RepoTags) == 0 { + // TODO(containerd): also skip if has children? + if !all && !danglingOnly { + continue + } + + if imageFilters.Contains("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + + if imageFilters.Contains("reference") { // skip images with no references if filtering by reference + continue + } + + if len(newImage.RepoDigests) == 0 { + // TODO(containerd): Requires querying content store directly, + // not currently possible + newImage.RepoTags = []string{"none@none"} + } + newImage.RepoTags = []string{"none:none"} + } else if danglingOnly { + continue + } + var target = imgs[0].Target var config ocispec.Descriptor @@ -297,62 +289,44 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al log.G(ctx).WithError(err).WithField("digest", config.Digest.String()).Warnf("unable to get image config info") } - // TODO: Add each image reference - // For these, unique them by manifest, none:none or none@digest - digests := map[string]struct{}{} - tags := map[string]struct{}{} - - for _, img := range imgs { - ref, err := reference.Parse(img.Name) - if err != nil { - if strings.HasPrefix(img.Name, "<") { - if idx := strings.Index(img.Name, ">@"); idx > 0 { - digests["none"+img.Name[idx+1:]] = struct{}{} - } - } - // TODO(containerd): Check for format such as @ - continue - } - if named, ok := ref.(reference.Named); ok { - if c, ok := named.(reference.Canonical); ok { - digests[reference.FamiliarString(c)] = struct{}{} - } else if t, ok := named.(reference.Tagged); ok { - tags[reference.FamiliarString(t)] = struct{}{} - } - } - } - - for d := range digests { - newImage.RepoDigests = append(newImage.RepoDigests, d) - } - for t := range tags { - newImage.RepoTags = append(newImage.RepoTags, t) - } - - if len(newImage.RepoTags) == 0 { - // TODO(containerd): also skip if has children? - if !all && !danglingOnly { - continue - } + //if withExtraAttrs { + // // lazily init variables + // if imagesMap == nil { + // allContainers = i.containers.List() - if imageFilters.Contains("dangling") && !danglingOnly { - //dangling=false case, so dangling image is not needed - continue - } + // // allLayers is built from all layerstores combined + // allLayers = make(map[layer.ChainID]layer.Layer) + // for _, ls := range i.layerStores { + // layers := ls.Map() + // for k, v := range layers { + // allLayers[k] = v + // } + // } + // imagesMap = make(map[*image.Image]*types.ImageSummary) + // layerRefs = make(map[layer.ChainID]int) + // } - if imageFilters.Contains("reference") { // skip images with no references if filtering by reference - continue - } + // // Get container count + // newImage.Containers = 0 + // for _, c := range allContainers { + // if c.ImageID == id { + // newImage.Containers++ + // } + // } - if len(newImage.RepoDigests) == 0 { - // TODO(containerd): Requires querying content store directly, - // not currently possible - newImage.RepoTags = []string{"none@none"} - } - newImage.RepoTags = []string{"none:none"} - } else if danglingOnly { - continue - } + // // count layer references + // rootFS := *img.RootFS + // rootFS.DiffIDs = nil + // for _, id := range img.RootFS.DiffIDs { + // rootFS.Append(id) + // chid := rootFS.ChainID() + // layerRefs[chid]++ + // if _, ok := allLayers[chid]; !ok { + // return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + // } + // } + // imagesMap[img] = newImage + //} imageSums = append(imageSums, newImage) } From a3548192fa585bde7b0cd7f424c451f96978453f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 21 Mar 2019 14:40:33 -0700 Subject: [PATCH 47/73] Fix image lookup and listing Ensure ambiguous reference is not mistakenly returned. Cleanup TODOs in listing and resolve image Signed-off-by: Derek McGowan --- daemon/images/image.go | 48 +++++++++++++++++++++-------------------- daemon/images/images.go | 15 ++++++------- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/daemon/images/image.go b/daemon/images/image.go index bbab1dc1a835d..e3913aca55f69 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "regexp" "sort" "github.com/containerd/containerd/content" @@ -30,6 +31,8 @@ const ( LabelLayerPrefix = "docker.io/layer." ) +var shortID = regexp.MustCompile(`^([a-f0-9]{4,64})$`) + // ErrImageDoesNotExist is error returned when no image can be found for a reference. type ErrImageDoesNotExist struct { ref reference.Reference @@ -52,7 +55,7 @@ func (e ErrImageDoesNotExist) NotFound() {} func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { parsed, err := reference.ParseAnyReference(refOrID) if err != nil { - return ocispec.Descriptor{}, err + return ocispec.Descriptor{}, errdefs.InvalidParameter(err) } is := i.client.ImageService() @@ -75,37 +78,39 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe return imgs[0].Target, nil } - // TODO(containerd): If namedRef matches COULD be interpreted as a - // digest prefer, do a lookup via `is.List` instead - // with an or clause - // TODO(containerd): Ensure named only - ref := namedRef.String() - if len(refOrID) < 64 { + // If the identifier could be a short ID, attempt to match + if shortID.MatchString(refOrID) { filters := []string{ fmt.Sprintf("name==%q", namedRef.String()), - fmt.Sprintf(`target.digest~="sha256:%s[0-9a-fA-F]{%d}"`, refOrID, 64-len(refOrID)), + fmt.Sprintf(`target.digest~=/sha256:%s[0-9a-fA-F]{%d}/`, refOrID, 64-len(refOrID)), } imgs, err := is.List(ctx, filters...) if err != nil { return ocispec.Descriptor{}, err } - if len(imgs) == 1 { - return imgs[0].Target, nil - } + if len(imgs) == 0 { return ocispec.Descriptor{}, errdefs.NotFound(errors.New("list returned no images")) } - for _, img := range imgs { - if img.Name == ref { - return img.Target, nil + if len(imgs) > 1 { + ref := namedRef.String() + digests := map[digest.Digest]struct{}{} + for _, img := range imgs { + if img.Name == ref { + return img.Target, nil + } + digests[img.Target.Digest] = struct{}{} } - } - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("ambiguous reference")) + if len(digests) > 1 { + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("ambiguous reference")) + } + } + return imgs[0].Target, nil } img, err := is.Get(ctx, namedRef.String()) if err != nil { - // TODO(containerd): Translate error directly + // TODO(containerd): error translation can use common function if !cerrdefs.IsNotFound(err) { return ocispec.Descriptor{}, err } @@ -276,6 +281,7 @@ func (i *ImageService) runtimeImages(ctx context.Context, image ocispec.Descript // GetImage returns an image corresponding to the image referred to by refOrID. // Deprecated: Use (i *ImageService).GetImage instead. +// TODO(containerd): remove this function and replace with ResolveImage func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { ref, err := reference.ParseAnyReference(refOrID) if err != nil { @@ -312,8 +318,7 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { //} return nil, ErrImageDoesNotExist{ref} } else { - // TODO: Choose correct platform - d, err := images.Config(context.TODO(), cs, img.Target, platforms.Default()) + d, err := images.Config(context.TODO(), cs, img.Target, i.platforms) if err != nil { if errdefs.IsNotFound(err) { return nil, ErrImageDoesNotExist{ref} @@ -325,7 +330,6 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { } } - // TODO(containerd): Move the reference setting and resolution img, err := i.getImage(context.TODO(), target) if err != nil { if errdefs.IsNotFound(err) { @@ -338,11 +342,10 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { return img, nil } -// TODO(containerd): remove or replace this function to return local type +// TODO(containerd): remove this function and replace with ResolveImage func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) (*image.Image, error) { cs := i.client.ContentStore() - // TODO(containerd): If not config, resolve b, err := content.ReadBlob(ctx, cs, target) if err != nil { return nil, errors.Wrap(err, "unable to read target blob") @@ -353,7 +356,6 @@ func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) return nil, errors.Wrap(err, "unable to unmarshal image config") } - // TODO(containerd): read labels from blob to get parent and Docker calculated size return &image.Image{ Config: target, Image: &img, diff --git a/daemon/images/images.go b/daemon/images/images.go index 5e9ff0397ef3d..c9c215b2d64aa 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -101,7 +101,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al return nil, invalidFilter{"reference", v} } - // TODO(containerd): handle canonical names + // TODO(containerd): handle canonical names (tag + digest) if nt, ok := named.(reference.NamedTagged); ok { filters = append(filters, "name=="+nt.String()) } else { @@ -182,15 +182,15 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al for _, img := range imgs { ref, err := reference.Parse(img.Name) if err != nil { + // Handle formats such as @sha256:... if strings.HasPrefix(img.Name, "<") { if idx := strings.Index(img.Name, ">@"); idx > 0 { digests["none"+img.Name[idx+1:]] = struct{}{} + continue } } - // TODO(containerd): Check for format such as @ - continue - } - if named, ok := ref.(reference.Named); ok { + log.G(ctx).WithError(err).WithField("name", img.Name).Debug("skipping image with unknown format") + } else if named, ok := ref.(reference.Named); ok { if c, ok := named.(reference.Canonical); ok { digests[reference.FamiliarString(c)] = struct{}{} } else if t, ok := named.(reference.Tagged); ok { @@ -207,7 +207,6 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al } if len(newImage.RepoTags) == 0 { - // TODO(containerd): also skip if has children? if !all && !danglingOnly { continue } @@ -222,8 +221,6 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al } if len(newImage.RepoDigests) == 0 { - // TODO(containerd): Requires querying content store directly, - // not currently possible newImage.RepoTags = []string{"none@none"} } newImage.RepoTags = []string{"none:none"} @@ -247,7 +244,6 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al config = desc } - // TODO(containerd): Stat config if info, err := cs.Info(ctx, config.Digest); err == nil { var sizeSet bool var size int64 @@ -289,6 +285,7 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al log.G(ctx).WithError(err).WithField("digest", config.Digest.String()).Warnf("unable to get image config info") } + // TODO(containerd): Support extra attrs //if withExtraAttrs { // // lazily init variables // if imagesMap == nil { From 3778719cab1b5db1e01ce8e18485bfb408c838c4 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 21 Mar 2019 16:54:29 -0700 Subject: [PATCH 48/73] Fix unit tests and lint Signed-off-by: Derek McGowan --- api/server/backend/build/backend.go | 2 +- daemon/container_unix_test.go | 3 +- daemon/daemon.go | 4 +- daemon/daemon_linux_test.go | 3 +- daemon/daemon_test.go | 3 +- daemon/images/generators_test.go | 3 +- daemon/images/image.go | 24 +- daemon/images/image_delete_test.go | 2 +- daemon/images/image_import.go | 3 +- daemon/images/image_inspect.go | 8 +- daemon/images/image_pull.go | 16 +- daemon/images/image_tag.go | 2 +- daemon/images/service.go | 3 +- daemon/images/service_test.go | 23 +- daemon/reload_test.go | 25 ++- .../containerd/archive/tartest/tar.go | 210 ++++++++++++++++++ 16 files changed, 273 insertions(+), 61 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/archive/tartest/tar.go diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index 2ca984ee123ac..02b3285bbf69b 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -21,7 +21,7 @@ import ( // ImageComponent provides an interface for working with images type ImageComponent interface { SquashImage(from string, to string) (string, error) - TagImageWithReference(context.Context, ocispec.Descriptor, reference.Named) error + TagImageWithReference(context.Context, ocispec.Descriptor, reference.Reference) error } // Builder defines interface for running a build diff --git a/daemon/container_unix_test.go b/daemon/container_unix_test.go index b4c5f84c7e794..7bad4e7b799d0 100644 --- a/daemon/container_unix_test.go +++ b/daemon/container_unix_test.go @@ -5,6 +5,7 @@ package daemon import ( "testing" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/daemon/config" @@ -37,7 +38,7 @@ func TestContainerWarningHostAndPublishPorts(t *testing.T) { }, } d := &Daemon{configStore: cs} - wrns, err := d.verifyContainerSettings("", hostConfig, &containertypes.Config{}, false) + wrns, err := d.verifyContainerSettings(platforms.DefaultSpec(), hostConfig, &containertypes.Config{}, false) assert.NilError(t, err) assert.DeepEqual(t, tc.warnings, wrns) } diff --git a/daemon/daemon.go b/daemon/daemon.go index 7db59c8fce933..37e9795e0e8b9 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -948,10 +948,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.graphDrivers = make(map[string]string) for _, driver := range storageDrivers { ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ - Root: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + Root: config.Root, GraphDriver: driver.name, GraphDriverOptions: config.GraphOptions, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), IDMapping: idMapping, PluginGetter: d.PluginStore, ExperimentalEnabled: config.Experimental, diff --git a/daemon/daemon_linux_test.go b/daemon/daemon_linux_test.go index e26c08f24e0f8..77ba4f994fac2 100644 --- a/daemon/daemon_linux_test.go +++ b/daemon/daemon_linux_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/containerd/containerd/platforms" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/pkg/mount" @@ -115,7 +116,7 @@ func TestNotCleanupMounts(t *testing.T) { func TestValidateContainerIsolationLinux(t *testing.T) { d := Daemon{} - _, err := d.verifyContainerSettings("linux", &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false) + _, err := d.verifyContainerSettings(platforms.DefaultSpec(), &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false) assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux")) } diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go index d89b0077fd6e2..7e1bec10d8d62 100644 --- a/daemon/daemon_test.go +++ b/daemon/daemon_test.go @@ -7,6 +7,7 @@ import ( "runtime" "testing" + "github.com/containerd/containerd/platforms" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" @@ -305,7 +306,7 @@ func TestMerge(t *testing.T) { func TestValidateContainerIsolation(t *testing.T) { d := Daemon{} - _, err := d.verifyContainerSettings(runtime.GOOS, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false) + _, err := d.verifyContainerSettings(platforms.DefaultSpec(), &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false) assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS)) } diff --git a/daemon/images/generators_test.go b/daemon/images/generators_test.go index 0111080a7a703..06b65f1202457 100644 --- a/daemon/images/generators_test.go +++ b/daemon/images/generators_test.go @@ -15,7 +15,6 @@ import ( "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/vmihailenco/bufio" ) type ingest func(context.Context, content.Store) error @@ -88,7 +87,7 @@ func withLayers(layers ...tartest.WriterToTar) manifestOpt { return func(m *ocispec.Manifest) ingest { var ingests []ingest for _, l := range layers { - br := bufio.NewBuffer(nil) + br := bytes.NewBuffer(nil) dgstr := digest.Canonical.Digester() cw, err := compression.CompressStream(br, compression.Gzip) if err != nil { diff --git a/daemon/images/image.go b/daemon/images/image.go index e3913aca55f69..15b9a85dcb02d 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -49,7 +49,7 @@ func (e ErrImageDoesNotExist) Error() string { // NotFound implements the NotFound interface func (e ErrImageDoesNotExist) NotFound() {} -// SearchImage searches for an image based on the given +// ResolveImage searches for an image based on the given // reference or identifier. Returns the descriptor of // the image, could be manifest list, manifest, or config. func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { @@ -236,10 +236,9 @@ func (i *ImageService) runtimeImages(ctx context.Context, image ocispec.Descript image.Config = manifest.Config runtimeImages = append(runtimeImages, image) return nil, nil - } else { - // Map config to the runtime image - imageMap[manifest.Config.Digest] = image } + // Map config to the runtime image + imageMap[manifest.Config.Digest] = image } else { imageMap[manifest.Config.Digest] = RuntimeImage{ Target: desc, @@ -317,17 +316,16 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { // return img, nil //} return nil, ErrImageDoesNotExist{ref} - } else { - d, err := images.Config(context.TODO(), cs, img.Target, i.platforms) - if err != nil { - if errdefs.IsNotFound(err) { - return nil, ErrImageDoesNotExist{ref} - } - return nil, errors.Wrap(err, "unable to resolve image") + } + d, err := images.Config(context.TODO(), cs, img.Target, i.platforms) + if err != nil { + if errdefs.IsNotFound(err) { + return nil, ErrImageDoesNotExist{ref} } - target = d - references = append(references, img.Target) + return nil, errors.Wrap(err, "unable to resolve image") } + target = d + references = append(references, img.Target) } img, err := i.getImage(context.TODO(), target) diff --git a/daemon/images/image_delete_test.go b/daemon/images/image_delete_test.go index d390d1b5d6aa5..c40b4be2a44f8 100644 --- a/daemon/images/image_delete_test.go +++ b/daemon/images/image_delete_test.go @@ -245,7 +245,7 @@ func testDeleteImages(ctx context.Context, t *testing.T, is *ImageService) { LabelImageParent: parentImg.config.String(), }, } - info, err := cs.Update(ctx, info, "labels."+LabelImageParent) + _, err := cs.Update(ctx, info, "labels."+LabelImageParent) if err != nil { t.Fatal(err) } diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index d05d1681e9425..88aa4f3c94f30 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -51,7 +51,8 @@ func (i *ImageService) ImportImage(ctx context.Context, src string, repository, } if tag != "" { - newRef, err = reference.WithTag(newRef, tag) + //set newRef + _, err = reference.WithTag(newRef, tag) if err != nil { return errdefs.InvalidParameter(err) } diff --git a/daemon/images/image_inspect.go b/daemon/images/image_inspect.go index c9483af98ad6f..4d88d0c17d9eb 100644 --- a/daemon/images/image_inspect.go +++ b/daemon/images/image_inspect.go @@ -133,7 +133,7 @@ func (i *ImageService) LookupImage(ctx context.Context, name string) (*types.Ima Created: img.Created.Format(time.RFC3339Nano), DockerVersion: img.DockerVersion, Author: img.Author, - Config: configToApiType(img.Config), + Config: configToAPIType(img.Config), Architecture: img.Architecture, Os: img.OS, OsVersion: img.OSVersion, @@ -161,10 +161,10 @@ func rootFSToAPIType(rootfs ocispec.RootFS) types.RootFS { } } -func configToApiType(c imageConfig) *containertype.Config { +func configToAPIType(c imageConfig) *containertype.Config { return &containertype.Config{ User: c.User, - ExposedPorts: portSetToApiType(c.ExposedPorts), + ExposedPorts: portSetToAPIType(c.ExposedPorts), Env: c.Env, WorkingDir: c.WorkingDir, Labels: c.Labels, @@ -182,7 +182,7 @@ func configToApiType(c imageConfig) *containertype.Config { } } -func portSetToApiType(ports map[string]struct{}) nat.PortSet { +func portSetToAPIType(ports map[string]struct{}) nat.PortSet { ps := nat.PortSet{} for p := range ports { ps[nat.Port(p)] = struct{}{} diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 3508987b27a12..3c276f939e4e3 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -89,12 +89,12 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - layers = map[digest.Digest][]ocispec.Descriptor{} - dlStatus = map[digest.Digest]bool{} - unpackDesc = map[digest.Digest]struct{}{} - unpacks int32 = 0 // how many unpacks occurred - lock = sync.Mutex{} - cond = sync.NewCond(&lock) + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + unpackDesc = map[digest.Digest]struct{}{} + unpacks int32 // how many unpacks occurred + lock = sync.Mutex{} + cond = sync.NewCond(&lock) ) grp, pctx := errgroup.WithContext(pctx) @@ -342,10 +342,10 @@ func (i *ImageService) applyLayer(ctx context.Context, ls layer.Store, blob ocis defer ra.Close() rc := ioutil.NopCloser(content.NewReader(ra)) - blobId := stringid.TruncateID(blob.Digest.String()) + blobID := stringid.TruncateID(blob.Digest.String()) reader := ioutils.NewCancelReadCloser(ctx, rc) if progressOutput != nil { - reader = progress.NewProgressReader(reader, progressOutput, blob.Size, blobId, "Extracting") + reader = progress.NewProgressReader(reader, progressOutput, blob.Size, blobID, "Extracting") } defer reader.Close() diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index e8ca8ff676f13..c78deee08f816 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -32,7 +32,7 @@ func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag } // TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec.Descriptor, newTag reference.Named) error { +func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec.Descriptor, newTag reference.Reference) error { im := images.Image{ Name: newTag.String(), Target: target, diff --git a/daemon/images/service.go b/daemon/images/service.go index 2164cee59a5f7..b0bf3c36ba145 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -35,6 +35,7 @@ type containerStore interface { Get(string) *container.Container } +// LayerBackend represents a layer storage backend along with its platform type LayerBackend struct { layer.Store Platform platforms.Matcher @@ -269,7 +270,7 @@ func (i *ImageService) Cleanup() { } } -// GraphDriverForOS returns the name of the graph driver for the given platform +// DriverName returns the name of the graph driver for the given platform func (i *ImageService) DriverName(p ocispec.Platform) string { ls, err := i.getLayerStore(p) if err != nil { diff --git a/daemon/images/service_test.go b/daemon/images/service_test.go index 5d455f97823e7..3b612d308d6a1 100644 --- a/daemon/images/service_test.go +++ b/daemon/images/service_test.go @@ -15,31 +15,30 @@ import ( imagessrv "github.com/containerd/containerd/api/services/images/v1" namespacessrv "github.com/containerd/containerd/api/services/namespaces/v1" "github.com/containerd/containerd/content" + _ "github.com/containerd/containerd/diff/walking/plugin" "github.com/containerd/containerd/events/exchange" + _ "github.com/containerd/containerd/gc/scheduler" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/services" + _ "github.com/containerd/containerd/services/containers" + _ "github.com/containerd/containerd/services/content" + _ "github.com/containerd/containerd/services/diff" + _ "github.com/containerd/containerd/services/images" + _ "github.com/containerd/containerd/services/leases" + _ "github.com/containerd/containerd/services/namespaces" "github.com/containerd/containerd/services/server" srvconfig "github.com/containerd/containerd/services/server/config" + _ "github.com/containerd/containerd/services/snapshots" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/pkg/errors" "github.com/sirupsen/logrus" - - _ "github.com/containerd/containerd/diff/walking/plugin" - _ "github.com/containerd/containerd/gc/scheduler" - _ "github.com/containerd/containerd/services/containers" - _ "github.com/containerd/containerd/services/content" - _ "github.com/containerd/containerd/services/diff" - _ "github.com/containerd/containerd/services/images" - _ "github.com/containerd/containerd/services/leases" - _ "github.com/containerd/containerd/services/namespaces" - _ "github.com/containerd/containerd/services/snapshots" ) var ( @@ -205,9 +204,9 @@ func setupTest(ctx context.Context, root string, service containerd.ClientOpt, f t.Fatal(err) } ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ - Root: root, - MetadataStorePathTemplate: filepath.Join(root, "layerdb"), + Root: root, GraphDriver: testgraphdriver, + MetadataStorePathTemplate: filepath.Join(root, "layerdb"), IDMapping: idMapping, OS: platform.OS, }) diff --git a/daemon/reload_test.go b/daemon/reload_test.go index ffad297f71b74..02e479d986f19 100644 --- a/daemon/reload_test.go +++ b/daemon/reload_test.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "os" "reflect" "sort" @@ -36,7 +37,7 @@ func TestDaemonReloadLabels(t *testing.T) { }, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -86,7 +87,7 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { }, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -163,7 +164,7 @@ func TestDaemonReloadMirrors(t *testing.T) { }, } - err := daemon.Reload(newConfig) + err := daemon.Reload(context.TODO(), newConfig) if !value.valid && err == nil { // mirrors should be invalid, should be a non-nil error t.Fatalf("Expected daemon reload error with invalid mirrors: %s, while get nil", value.mirrors) @@ -242,7 +243,7 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) { }, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -313,7 +314,7 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) { }, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -382,7 +383,7 @@ func TestDaemonDiscoveryReload(t *testing.T) { &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -427,7 +428,7 @@ func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -474,7 +475,7 @@ func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.Reload(context.TODO(), newConfig); err != nil { t.Fatal(err) } @@ -533,7 +534,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { // Enable/Disable the server for some iterations for i := 0; i < 10; i++ { enableConfig.CommonConfig.NetworkDiagnosticPort++ - if err := daemon.Reload(enableConfig); err != nil { + if err := daemon.Reload(context.TODO(), enableConfig); err != nil { t.Fatal(err) } // Check that the diagnostic is enabled @@ -542,7 +543,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { } // Reload - if err := daemon.Reload(disableConfig); err != nil { + if err := daemon.Reload(context.TODO(), disableConfig); err != nil { t.Fatal(err) } // Check that the diagnostic is disabled @@ -553,7 +554,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { enableConfig.CommonConfig.NetworkDiagnosticPort++ // 2 times the enable should not create problems - if err := daemon.Reload(enableConfig); err != nil { + if err := daemon.Reload(context.TODO(), enableConfig); err != nil { t.Fatal(err) } // Check that the diagnostic is enabled @@ -562,7 +563,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { } // Check that another reload does not cause issues - if err := daemon.Reload(enableConfig); err != nil { + if err := daemon.Reload(context.TODO(), enableConfig); err != nil { t.Fatal(err) } // Check that the diagnostic is enable diff --git a/vendor/github.com/containerd/containerd/archive/tartest/tar.go b/vendor/github.com/containerd/containerd/archive/tartest/tar.go new file mode 100644 index 0000000000000..a754a51925257 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tartest/tar.go @@ -0,0 +1,210 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tartest + +import ( + "archive/tar" + "errors" + "io" + "os" + "time" +) + +// WriterToTar is an type which writes to a tar writer +type WriterToTar interface { + WriteTo(*tar.Writer) error +} + +type writerToFn func(*tar.Writer) error + +func (w writerToFn) WriteTo(tw *tar.Writer) error { + return w(tw) +} + +// TarAll creates a WriterToTar which calls all the provided writers +// in the order in which they are provided. +func TarAll(wt ...WriterToTar) WriterToTar { + return writerToFn(func(tw *tar.Writer) error { + for _, w := range wt { + if err := w.WriteTo(tw); err != nil { + return err + } + } + return nil + }) +} + +// TarFromWriterTo is used to create a tar stream from a tar record +// creator. This can be used to manufacture more specific tar records +// which allow testing specific tar cases which may be encountered +// by the untar process. +func TarFromWriterTo(wt WriterToTar) io.ReadCloser { + r, w := io.Pipe() + go func() { + tw := tar.NewWriter(w) + if err := wt.WriteTo(tw); err != nil { + w.CloseWithError(err) + return + } + w.CloseWithError(tw.Close()) + }() + + return r +} + +// TarContext is used to create tar records +type TarContext struct { + UID int + GID int + + // ModTime sets the modtimes for all files, if nil the current time + // is used for each file when it was written + ModTime *time.Time + + Xattrs map[string]string +} + +func (tc TarContext) newHeader(mode os.FileMode, name, link string, size int64) *tar.Header { + ti := tarInfo{ + name: name, + mode: mode, + size: size, + modt: tc.ModTime, + hdr: &tar.Header{ + Uid: tc.UID, + Gid: tc.GID, + Xattrs: tc.Xattrs, + }, + } + + if mode&os.ModeSymlink == 0 && link != "" { + ti.hdr.Typeflag = tar.TypeLink + ti.hdr.Linkname = link + } + + hdr, err := tar.FileInfoHeader(ti, link) + if err != nil { + // Only returns an error on bad input mode + panic(err) + } + + return hdr +} + +type tarInfo struct { + name string + mode os.FileMode + size int64 + modt *time.Time + hdr *tar.Header +} + +func (ti tarInfo) Name() string { + return ti.name +} + +func (ti tarInfo) Size() int64 { + return ti.size +} +func (ti tarInfo) Mode() os.FileMode { + return ti.mode +} + +func (ti tarInfo) ModTime() time.Time { + if ti.modt != nil { + return *ti.modt + } + return time.Now().UTC() +} + +func (ti tarInfo) IsDir() bool { + return (ti.mode & os.ModeDir) != 0 +} +func (ti tarInfo) Sys() interface{} { + return ti.hdr +} + +// WithUIDGID sets the UID and GID for tar entries +func (tc TarContext) WithUIDGID(uid, gid int) TarContext { + ntc := tc + ntc.UID = uid + ntc.GID = gid + return ntc +} + +// WithModTime sets the ModTime for tar entries +func (tc TarContext) WithModTime(modtime time.Time) TarContext { + ntc := tc + ntc.ModTime = &modtime + return ntc +} + +// WithXattrs adds these xattrs to all files, merges with any +// previously added xattrs +func (tc TarContext) WithXattrs(xattrs map[string]string) TarContext { + ntc := tc + if ntc.Xattrs == nil { + ntc.Xattrs = map[string]string{} + } + for k, v := range xattrs { + ntc.Xattrs[k] = v + } + return ntc +} + +// File returns a regular file tar entry using the provided bytes +func (tc TarContext) File(name string, content []byte, perm os.FileMode) WriterToTar { + return writerToFn(func(tw *tar.Writer) error { + return writeHeaderAndContent(tw, tc.newHeader(perm, name, "", int64(len(content))), content) + }) +} + +// Dir returns a directory tar entry +func (tc TarContext) Dir(name string, perm os.FileMode) WriterToTar { + return writerToFn(func(tw *tar.Writer) error { + return writeHeaderAndContent(tw, tc.newHeader(perm|os.ModeDir, name, "", 0), nil) + }) +} + +// Symlink returns a symlink tar entry +func (tc TarContext) Symlink(oldname, newname string) WriterToTar { + return writerToFn(func(tw *tar.Writer) error { + return writeHeaderAndContent(tw, tc.newHeader(0777|os.ModeSymlink, newname, oldname, 0), nil) + }) +} + +// Link returns a hard link tar entry +func (tc TarContext) Link(oldname, newname string) WriterToTar { + return writerToFn(func(tw *tar.Writer) error { + return writeHeaderAndContent(tw, tc.newHeader(0777, newname, oldname, 0), nil) + }) +} + +func writeHeaderAndContent(tw *tar.Writer, h *tar.Header, b []byte) error { + if h.Size != int64(len(b)) { + return errors.New("bad content length") + } + if err := tw.WriteHeader(h); err != nil { + return err + } + if len(b) > 0 { + if _, err := tw.Write(b); err != nil { + return err + } + } + return nil +} From 2fc1906d10f44fab096f48ffc54068e1447aaf99 Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Fri, 8 Mar 2019 15:46:27 -0800 Subject: [PATCH 49/73] use containerd client to create image during build Signed-off-by: Anda Xu --- api/types/backend/build.go | 9 ++ builder/builder.go | 3 +- builder/dockerfile/builder.go | 33 ++++--- builder/dockerfile/imagecontext.go | 41 ++++++++- builder/dockerfile/internals.go | 26 ++---- cmd/dockerd/daemon.go | 2 +- daemon/daemon.go | 5 ++ daemon/images/image_builder.go | 136 +++++++++++++++++++++++++++-- daemon/images/image_commit.go | 2 +- 9 files changed, 209 insertions(+), 48 deletions(-) diff --git a/api/types/backend/build.go b/api/types/backend/build.go index 1a2e59f2f7c88..a8674ec52cbb2 100644 --- a/api/types/backend/build.go +++ b/api/types/backend/build.go @@ -4,6 +4,7 @@ import ( "io" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/streamformatter" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -43,3 +44,11 @@ type GetImageAndLayerOptions struct { Output io.Writer Platform *specs.Platform } + +type NewImageConfig struct { + ParentImageID string + Author string + OS string + ContainerConfig *container.Config + Config *container.Config +} diff --git a/builder/builder.go b/builder/builder.go index 716b3fb4fb4fd..fe3d1141c9ed4 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( @@ -46,7 +47,7 @@ type Backend interface { // ContainerCreateWorkdir creates the workdir ContainerCreateWorkdir(containerID string) error - CreateImage(config []byte, parent string) (Image, error) + CreateImage(ctx context.Context, newImage backend.NewImageConfig, newROLayer ROLayer) (ocispec.Descriptor, error) ImageCacheBuilder } diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index a6094a8973f11..da043a34de617 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/containerd/containerd" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -56,21 +57,23 @@ type SessionGetter interface { // BuildManager is shared across all Builder objects type BuildManager struct { - idMapping *idtools.IdentityMapping - backend builder.Backend - pathCache pathCache // TODO: make this persistent - sg SessionGetter - fsCache *fscache.FSCache + idMapping *idtools.IdentityMapping + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache + containerdCli *containerd.Client } // NewBuildManager creates a BuildManager -func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, identityMapping *idtools.IdentityMapping) (*BuildManager, error) { +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, identityMapping *idtools.IdentityMapping, containerdCli *containerd.Client) (*BuildManager, error) { bm := &BuildManager{ - backend: b, - pathCache: &syncmap.Map{}, - sg: sg, - idMapping: identityMapping, - fsCache: fsCache, + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + idMapping: identityMapping, + fsCache: fsCache, + containerdCli: containerdCli, } if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { return nil, err @@ -112,6 +115,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( Backend: bm.backend, PathCache: bm.pathCache, IDMapping: bm.idMapping, + containerdCli: bm.containerdCli, } b, err := newBuilder(ctx, builderOptions) if err != nil { @@ -160,6 +164,7 @@ type builderOptions struct { ProgressWriter backend.ProgressWriter PathCache pathCache IDMapping *idtools.IdentityMapping + containerdCli *containerd.Client } // Builder is a Dockerfile builder @@ -172,8 +177,9 @@ type Builder struct { Aux *streamformatter.AuxFormatter Output io.Writer - docker builder.Backend - clientCtx context.Context + docker builder.Backend + containerdCli *containerd.Client + clientCtx context.Context idMapping *idtools.IdentityMapping disableCommit bool @@ -199,6 +205,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, er Aux: options.ProgressWriter.AuxFormatter, Output: options.ProgressWriter.Output, docker: options.Backend, + containerdCli: options.containerdCli, idMapping: options.IDMapping, imageSources: newImageSources(clientCtx, options), pathCache: options.PathCache, diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go index 08cb396a2bdfb..ea7801a20f4c6 100644 --- a/builder/dockerfile/imagecontext.go +++ b/builder/dockerfile/imagecontext.go @@ -4,15 +4,18 @@ import ( "context" "runtime" + "github.com/containerd/containerd" + "github.com/containerd/containerd/content" "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" dockerimage "github.com/docker/docker/image" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -type getAndMountFunc func(string, bool, *specs.Platform) (builder.Image, builder.ROLayer, error) +type getAndMountFunc func(string, bool, *ocispec.Platform) (builder.Image, builder.ROLayer, error) // imageSources mounts images and provides a cache for mounted images. It tracks // all images so they can be unmounted at the end of the build. @@ -23,7 +26,7 @@ type imageSources struct { } func newImageSources(ctx context.Context, options builderOptions) *imageSources { - getAndMount := func(idOrRef string, localOnly bool, platform *specs.Platform) (builder.Image, builder.ROLayer, error) { + getAndMount := func(idOrRef string, localOnly bool, platform *ocispec.Platform) (builder.Image, builder.ROLayer, error) { pullOption := backend.PullOptionNoPull if !localOnly { if options.Options.PullParent { @@ -46,7 +49,7 @@ func newImageSources(ctx context.Context, options builderOptions) *imageSources } } -func (m *imageSources) Get(idOrRef string, localOnly bool, platform *specs.Platform) (*imageMount, error) { +func (m *imageSources) Get(idOrRef string, localOnly bool, platform *ocispec.Platform) (*imageMount, error) { if im, ok := m.byImageID[idOrRef]; ok { return im, nil } @@ -120,3 +123,33 @@ func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { func (im *imageMount) ImageID() string { return im.image.ImageID() } + +type containerdImage struct { + desc ocispec.Descriptor + containerdCli *containerd.Client + config *container.Config +} + +func newContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) *containerdImage { + return &containerdImage{desc: desc, containerdCli: client, config: config} +} + +func (ci *containerdImage) ImageID() string { + return ci.desc.Digest.String() +} + +func (ci *containerdImage) RunConfig() *container.Config { + return ci.config +} + +func (ci *containerdImage) OperatingSystem() string { + return ci.desc.Platform.OS +} + +func (ci *containerdImage) MarshalJSON() ([]byte, error) { + b, err := content.ReadBlob(context.Background(), ci.containerdCli.ContentStore(), ci.desc) + if err != nil { + return nil, errors.Wrap(err, "unable to read config") + } + return b, nil +} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index d7d04312b8d59..ab6cd15a45b83 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -4,6 +4,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" // non-contiguous functionality. Please read the comments. import ( + "context" "crypto/sha256" "encoding/hex" "fmt" @@ -18,7 +19,6 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" @@ -112,6 +112,7 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta } func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { + logrus.Infof("state.imageID=%s parent.ImageID=%s", state.imageID, parent.ImageID()) newLayer, err := layer.Commit() if err != nil { return err @@ -121,32 +122,19 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren // if there is an error before we can add the full mount with image b.imageSources.Add(newImageMount(nil, newLayer)) - parentImage, ok := parent.(*image.Image) - if !ok { - return errors.Errorf("unexpected image type") - } - - newImage := image.NewChildImage(parentImage, image.ChildConfig{ + config := backend.NewImageConfig{ + ParentImageID: parent.ImageID(), Author: state.maintainer, ContainerConfig: runConfig, - DiffID: newLayer.DiffID(), Config: copyRunConfig(state.runConfig), - }, parentImage.OS) - - // TODO: it seems strange to marshal this here instead of just passing in the - // image struct - config, err := newImage.MarshalJSON() - if err != nil { - return errors.Wrap(err, "failed to encode image config") } - - exportedImage, err := b.docker.CreateImage(config, state.imageID) + exportedImage, err := b.docker.CreateImage(context.Background(), config, newLayer) if err != nil { return errors.Wrapf(err, "failed to export image") } - state.imageID = exportedImage.ImageID() - b.imageSources.Add(newImageMount(exportedImage, newLayer)) + state.imageID = exportedImage.Digest.String() + b.imageSources.Add(newImageMount(newContainerdImage(exportedImage, b.containerdCli, copyRunConfig(state.runConfig)), newLayer)) return nil } diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 0e2bf6a1aca83..af4b0af246eba 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -304,7 +304,7 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e return opts, errors.Wrap(err, "failed to create fscache") } - manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), sm, buildCache, d.IdentityMapping()) + manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), sm, buildCache, d.IdentityMapping(), d.ContainerdClient()) if err != nil { return opts, err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 37e9795e0e8b9..723ece351b81b 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1518,6 +1518,11 @@ func (daemon *Daemon) ImageService() *images.ImageService { return daemon.imageService } +// ContainerdClient returns the containerd client used by the daemon +func (daemon *Daemon) ContainerdClient() *containerd.Client { + return daemon.containerdCli +} + // BuilderBackend returns the backend used by builder func (daemon *Daemon) BuilderBackend() builder.Backend { return struct { diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index 9df0d898dee42..2efc33b9101bd 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -1,10 +1,20 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "bytes" "context" + "encoding/json" + "fmt" "io" "runtime" + "strings" + "time" + "github.com/docker/docker/api/types/container" + + "github.com/containerd/containerd/images" + + "github.com/containerd/containerd/content" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -15,7 +25,8 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" - specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -140,7 +151,7 @@ func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLay // TODO: could this use the regular daemon PullImage ? // TODO(containerd): don't return *image.Image type -func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform *specs.Platform) (*image.Image, error) { +func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform *ocispec.Platform) (*image.Image, error) { ref, err := reference.ParseNormalizedNamed(name) if err != nil { return nil, err @@ -210,18 +221,125 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s // CreateImage creates a new image by adding a config and ID to the image store. // This is similar to LoadImage() except that it receives JSON encoded bytes of // an image instead of a tar archive. -func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { +func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImageConfig, newROLayer builder.ROLayer) (ocispec.Descriptor, error) { // TODO(containerd): use containerd's image store - id, err := i.imageStore.Create(config) + + cache, err := i.getCache(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to create image") + return ocispec.Descriptor{}, err + } + + var img struct { + ocispec.Image + + // Overwrite config for custom Docker fields + Container string `json:"container,omitempty"` + ContainerConfig container.Config `json:"container_config,omitempty"` + Config *container.Config `json:"config,omitempty"` + + Comment string `json:"comment,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + // TODO: Overwrite this with a label from config + DockerVersion string `json:"docker_version,omitempty"` } - if parent != "" { - if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil { - return nil, errors.Wrapf(err, "failed to set parent %s", parent) + if newImage.ParentImageID == "" { + img.RootFS.Type = "layers" + } else { + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.Digest(newImage.ParentImageID), + } + + b, err := content.ReadBlob(ctx, i.client.ContentStore(), desc) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to read config") } + + if err := json.Unmarshal(b, &img); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal config") + } + } + created := time.Now().UTC() + img.Created = &created + + isEmptyLayer := layer.IsEmpty(newROLayer.DiffID()) + if !isEmptyLayer { + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(newROLayer.DiffID())) + } + img.History = append(img.History, ocispec.History{ + Author: newImage.Author, + Created: &created, + CreatedBy: strings.Join(newImage.ContainerConfig.Cmd, " "), + EmptyLayer: isEmptyLayer, + }) + img.Author = newImage.Author + img.OS = newImage.OS + img.Config = newImage.Config + img.ContainerConfig = *newImage.ContainerConfig + + store, err := i.getLayerStore(ocispec.Platform{OS: newImage.OS}) + if err != nil { + return ocispec.Descriptor{}, err + } + + config, err := json.Marshal(img) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(config), + Size: int64(len(config)), + } + + newLayer, ok := newROLayer.(*roLayer) + if !ok { + return ocispec.Descriptor{}, errors.Errorf("unexpected image type") + } + + driver := newLayer.layerStore.DriverName() + key := fmt.Sprintf("%s%s", LabelLayerPrefix, driver) + layerID := digest.Digest(newLayer.roLayer.ChainID()) + labels := map[string]string{ + key: layerID.String(), + } + + if newImage.ParentImageID != "" { + labels[LabelImageParent] = newImage.ParentImageID + } + + opts := []content.Opt{content.WithLabels(labels)} + + // write image config data to content store + ref := fmt.Sprintf("config-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(config), desc, opts...); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to store config") + } + + // create a dangling image + _, err = i.client.ImageService().Create(ctx, images.Image{ + Name: desc.Digest.String(), + Target: desc, + CreatedAt: created, + UpdatedAt: created, + }) + if err != nil { + return ocispec.Descriptor{}, errors.Wrapf(err, "failed to create image") + } + + cache.m.Lock() + + if _, ok := cache.layers[driver][layerID]; !ok { + cache.layers[driver][layerID] = newLayer.roLayer + } else { + // Image already retained, don't hold onto layer + defer layer.ReleaseAndLog(store, newLayer.roLayer) } + cache.m.Unlock() - return i.imageStore.Get(id) + return desc, nil } diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index 9c71ae2bf757b..bb60644a36a12 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -22,7 +22,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" From 28decab61058731ef4af36f99cf7bc32e68afb7f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 21 Mar 2019 18:07:38 -0700 Subject: [PATCH 50/73] Fix CI linting Signed-off-by: Derek McGowan --- api/types/backend/build.go | 1 + builder/dockerfile/mockbackend_test.go | 5 +++-- daemon/images/service.go | 20 -------------------- daemon/list.go | 18 +++++++++++++++++- 4 files changed, 21 insertions(+), 23 deletions(-) diff --git a/api/types/backend/build.go b/api/types/backend/build.go index a8674ec52cbb2..ea99c422080f3 100644 --- a/api/types/backend/build.go +++ b/api/types/backend/build.go @@ -45,6 +45,7 @@ type GetImageAndLayerOptions struct { Platform *specs.Platform } +// NewImageConfig are options for creating new images type NewImageConfig struct { ParentImageID string Author string diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index 969c24f214c58..de9d291681c3b 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // MockBackend implements the builder.Backend interface for unit testing @@ -81,8 +82,8 @@ func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache { return nil } -func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) { - return nil, nil +func (m *MockBackend) CreateImage(ctx context.Context, newImage backend.NewImageConfig, newROLayer builder.ROLayer) (ocispec.Descriptor, error) { + return ocispec.Descriptor{}, nil } type mockImage struct { diff --git a/daemon/images/service.go b/daemon/images/service.go index b0bf3c36ba145..6ed2f5605acea 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -2,12 +2,10 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "fmt" "os" "sync" "github.com/containerd/containerd" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" @@ -20,7 +18,6 @@ import ( "github.com/docker/docker/pkg/system" dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -180,23 +177,6 @@ func (i *ImageService) CountImages(ctx context.Context) (int, error) { return len(imgs), nil } -// ChildrenByID returns the children image digests for a parent image. -// called from list.go to filter containers -func (i *ImageService) ChildrenByID(ctx context.Context, id digest.Digest) ([]digest.Digest, error) { - cs := i.client.ContentStore() - - var children []digest.Digest - err := cs.Walk(ctx, func(info content.Info) error { - children = append(children, info.Digest) - return nil - }, fmt.Sprintf("labels.%q==%s", LabelImageParent, id.String())) - if err != nil { - return nil, err - } - - return children, nil -} - // GetImageBackend returns the storage backend used by the given image // TODO(containerd): return more abstract interface to support snapshotters func (i *ImageService) GetImageBackend(image RuntimeImage) (layer.Store, error) { diff --git a/daemon/list.go b/daemon/list.go index 26e238b8a2cb9..acdb932aede03 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/containerd/containerd/content" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/container" @@ -241,6 +242,21 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list return newC, nil } +func (daemon *Daemon) childrenByID(ctx context.Context, id digest.Digest) ([]digest.Digest, error) { + cs := daemon.containerdCli.ContentStore() + + var children []digest.Digest + err := cs.Walk(ctx, func(info content.Info) error { + children = append(children, info.Digest) + return nil + }, fmt.Sprintf("labels.%q==%s", images.LabelImageParent, id.String())) + if err != nil { + return nil, err + } + + return children, nil +} + // foldFilter generates the container filter based on the user's filtering options. func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, config *types.ContainerListOptions) (*listContext, error) { psFilters := config.Filters @@ -328,7 +344,7 @@ func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, confi return nil } // Then walk down the graph and put the imageIds in imagesFilter - return populateImageFilterByParents(ctx, imagesFilter, img.Digest, daemon.imageService.ChildrenByID) + return populateImageFilterByParents(ctx, imagesFilter, img.Digest, daemon.childrenByID) }) } From 30fddf606a29afcb9d6dab92aabc02a70026c829 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 21 Mar 2019 20:33:29 -0700 Subject: [PATCH 51/73] Revert image package changes The entire package is now deprecated, revert temporary changes. Signed-off-by: Derek McGowan --- image/image.go | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/image/image.go b/image/image.go index d0bfcec3d2b0e..079ecb813172a 100644 --- a/image/image.go +++ b/image/image.go @@ -12,7 +12,6 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ID is the content-addressable ID of an image. @@ -62,36 +61,18 @@ type V1Image struct { // Image stores the image configuration type Image struct { - // DEPRECATED FOR OCI V1Image - - // Config is the descriptor for the image configuration - Config ocispec.Descriptor - - // Image is the image configuration - Image *ocispec.Image - - // References refers to known manifests which reference this image - References []ocispec.Descriptor - - // TODO(containerd): this can be a digest to another config - Parent ID `json:"parent,omitempty"` - - // DEPRECATED: in OCI image - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - - // DEPRECATED: now in config platform - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` // rawJSON caches the immutable JSON associated with this image. - // DEPRECATED: use content store + config digest rawJSON []byte // computedID is the ID computed from the hash of the image config. // Not to be confused with the legacy V1 ID in V1Image. - // DEPRECATED: now config digest computedID ID } @@ -112,7 +93,7 @@ func (img *Image) ImageID() string { // RunConfig returns the image's container config. func (img *Image) RunConfig() *container.Config { - return img.V1Image.Config + return img.Config } // BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. From 5b45a83fe8ec9c2f73165a84e6ddb8922f040b6b Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Fri, 22 Mar 2019 00:06:05 -0700 Subject: [PATCH 52/73] add already exists image check during pull Signed-off-by: Anda Xu --- daemon/images/image_pull.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 3c276f939e4e3..453ba9be6b8ff 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -14,6 +14,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + ctrerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" @@ -206,14 +207,13 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference img.Name = c.String() _, err = i.client.ImageService().Create(ctx, img) - if err != nil { + if err != nil && !ctrerrdefs.IsAlreadyExists(err) { return errors.Wrap(err, "failed to save canonical image") } stopProgress() <-progress - - return err + return nil } func (i *ImageService) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor, progressOutput progress.Output, cond *sync.Cond, status map[digest.Digest]bool) error { From 60d7ecaa623c592e54438a23e0bb9362890e9ab0 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 22 Mar 2019 14:43:49 -0700 Subject: [PATCH 53/73] Support updating existing image after pull Fix build Signed-off-by: Derek McGowan --- daemon/images/image.go | 10 ++-------- daemon/images/image_pull.go | 9 +++++++-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/daemon/images/image.go b/daemon/images/image.go index 15b9a85dcb02d..b24bde4c6c94e 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -289,7 +289,6 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { var target ocispec.Descriptor cs := i.client.ContentStore() - references := []ocispec.Descriptor{} namedRef, ok := ref.(reference.Named) if !ok { @@ -325,7 +324,6 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { return nil, errors.Wrap(err, "unable to resolve image") } target = d - references = append(references, img.Target) } img, err := i.getImage(context.TODO(), target) @@ -335,7 +333,6 @@ func (i *ImageService) getDockerImage(refOrID string) (*image.Image, error) { } return nil, err } - img.References = references return img, nil } @@ -349,13 +346,10 @@ func (i *ImageService) getImage(ctx context.Context, target ocispec.Descriptor) return nil, errors.Wrap(err, "unable to read target blob") } - var img ocispec.Image + var img image.Image if err := json.Unmarshal(b, &img); err != nil { return nil, errors.Wrap(err, "unable to unmarshal image config") } - return &image.Image{ - Config: target, - Image: &img, - }, nil + return &img, nil } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 453ba9be6b8ff..de0bfebe67f8f 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -207,8 +207,13 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference img.Name = c.String() _, err = i.client.ImageService().Create(ctx, img) - if err != nil && !ctrerrdefs.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to save canonical image") + if err != nil { + if ctrerrdefs.IsAlreadyExists(err) { + _, err = i.client.ImageService().Update(ctx, img) + } + if err != nil { + return errors.Wrap(err, "failed to create image") + } } stopProgress() From 82ceb3c973f93b46618980e37b112ab70823e272 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 22 Mar 2019 15:01:30 -0700 Subject: [PATCH 54/73] Fix swagger nonsense Signed-off-by: Derek McGowan --- api/types/container/container_wait.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/api/types/container/container_wait.go b/api/types/container/container_wait.go index 94b6a20e159b6..7eec9d83e47cc 100644 --- a/api/types/container/container_wait.go +++ b/api/types/container/container_wait.go @@ -7,14 +7,6 @@ package container // import "github.com/docker/docker/api/types/container" // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - // ContainerWaitOKBody OK response to ContainerWait operation // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { @@ -27,3 +19,11 @@ type ContainerWaitOKBody struct { // Required: true StatusCode int64 `json:"StatusCode"` } + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} From 3d4518644cfda94d6aa5ca717bd4d6e6132d5afd Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 22 Mar 2019 16:36:35 -0700 Subject: [PATCH 55/73] Pass test debug flag from environment Signed-off-by: Derek McGowan --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index f59ce723507a2..38559c6b2d21b 100644 --- a/Makefile +++ b/Makefile @@ -55,6 +55,7 @@ DOCKER_ENVS := \ -e TEST_INTEGRATION_DIR \ -e TESTDIRS \ -e TESTFLAGS \ + -e TESTDEBUG \ -e TIMEOUT \ -e VALIDATE_REPO \ -e VALIDATE_BRANCH \ From 516fa02343b4c0e0dbd11c35b866fcecb5db3cf0 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 25 Mar 2019 13:55:30 -0700 Subject: [PATCH 56/73] Add load implementation using containerd import Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/images/image_exporter.go | 10 -- daemon/images/image_load.go | 131 ++++++++++++++++++++++++ 4 files changed, 133 insertions(+), 12 deletions(-) create mode 100644 daemon/images/image_load.go diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 37f3c4c05c0a7..71866b0651729 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -29,7 +29,7 @@ type imageBackend interface { } type importExportBackend interface { - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error ImportImage(ctx context.Context, src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error ExportImage(names []string, outStream io.Writer) error } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index a3caa21268489..d958721f2220d 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -176,7 +176,7 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, output := ioutils.NewWriteFlusher(w) defer output.Close() - if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + if err := s.backend.LoadImage(ctx, r.Body, output, quiet); err != nil { output.Write(streamformatter.FormatError(err)) } return nil diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 390fad09ebf2a..6fb0931d3d9d6 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -18,13 +18,3 @@ func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { //return imageExporter.Save(names, outStream) return errdefs.ErrNotImplemented } - -// LoadImage uploads a set of images into the repository. This is the -// complement of ImageExport. The input stream is an uncompressed tar -// ball containing images and metadata. -func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - // TODO(containerd): use containerd's archive importer - //imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - //return imageExporter.Load(inTar, outStream, quiet) - return errdefs.ErrNotImplemented -} diff --git a/daemon/images/image_load.go b/daemon/images/image_load.go new file mode 100644 index 0000000000000..e37996c0e6d6c --- /dev/null +++ b/daemon/images/image_load.go @@ -0,0 +1,131 @@ +package images // import "github.com/docker/docker/daemon/images" +import ( + "context" + "encoding/json" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/log" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var p progress.Output + if !quiet { + p = streamformatter.NewJSONProgressOutput(outStream, false) + } + + ctx, done, err := i.client.WithLease(ctx) + if err != nil { + return err + } + defer func() { + if err := done(context.Background()); err != nil { + log.G(ctx).WithError(err).Errorf("lease release failed") + } + }() + + cs := i.client.ContentStore() + index, err := archive.ImportIndex(ctx, cs, inTar) + if err != nil { + // TODO(containerd): Handle unrecognized type for older + // docker images. Update import index to return an error + // which has all the blobs written + return err + } + + var ( + imgs []images.Image + is = i.client.ImageService() + ) + + // TODO(containerd): Provide option for naming OCI index + //imgs = append(imgs, images.Image{ + // Name: iopts.indexName, + // Target: index, + //}) + + var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + // Only save images at top level + if desc.Digest != index.Digest { + return images.Children(ctx, cs, desc) + } + + b, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + + var idx ocispec.Index + if err := json.Unmarshal(b, &idx); err != nil { + return nil, err + } + + for _, m := range idx.Manifests { + ref := m.Annotations[ocispec.AnnotationRefName] + if ref == "" { + log.G(ctx).Debugf("image skipped, no name for %s", m.Digest.String()) + continue + } + + if p != nil { + progress.Message(p, ref, "Importing") + } + + mfst, err := images.Manifest(ctx, cs, m, i.platforms) + if err != nil { + return nil, err + } + + if err := i.unpack(ctx, mfst.Config, mfst.Layers, p, nil, nil); err != nil { + return nil, errors.Wrap(err, "failed to unpack image") + } + + imgID := m.Digest.String() + imgs = append(imgs, images.Image{ + Name: ref, + Target: m, + }, images.Image{ + Name: ref + "@" + imgID, + Target: m, + }) + + imgs = append(imgs) + + i.LogImageEvent(ctx, imgID, imgID, "load") + } + + return idx.Manifests, nil + } + + handler = images.SetChildrenLabels(cs, handler) + if err := images.Walk(ctx, handler, index); err != nil { + return err + } + + for i := range imgs { + img, err := is.Update(ctx, imgs[i], "target") + if err != nil { + if !errdefs.IsNotFound(err) { + return err + } + + img, err = is.Create(ctx, imgs[i]) + if err != nil { + return err + } + } + imgs[i] = img + } + + return nil +} From 42ed7327a380eac375603d70a46686eaf7c7898e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 25 Mar 2019 14:54:10 -0700 Subject: [PATCH 57/73] Update make emptyfs to use supported load format Signed-off-by: Derek McGowan --- hack/make/.ensure-emptyfs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hack/make/.ensure-emptyfs b/hack/make/.ensure-emptyfs index 898cc22834d33..8dac8d2e913f5 100644 --- a/hack/make/.ensure-emptyfs +++ b/hack/make/.ensure-emptyfs @@ -6,18 +6,18 @@ if ! docker image inspect emptyfs > /dev/null; then # see https://github.com/docker/docker/pull/5262 # and also https://github.com/docker/docker/issues/4242 dir="$DEST/emptyfs" - uuid=511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 - mkdir -p "$dir/$uuid" + layerid=269cb3feb538d91d2c38c78063d287bdc9a11098e4266d141087d6c61f4f42ec + mkdir -p "$dir/$layerid" ( - echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > "$dir/repositories" - cd "$dir/$uuid" - echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json - echo '1.0' > VERSION - tar -cf layer.tar --files-from /dev/null + config="11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + echo "[{\"Config\":\"$config.json\",\"RepoTags\":[\"emptyfs:latest\"],\"Layers\":[\"$layerid/layer.tar\"]}]" > "$dir/manifest.json" + echo '{"architecture":"x86_64","comment":"Imported from -","container_config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2013-06-13T14:03:50.821769-07:00","docker_version":"0.4.0","history":[{"created":"2013-06-13T14:03:50.821769-07:00","comment":"Imported from -"}],"rootfs":{"type":"layers","diff_ids":["sha256:84ff92691f909a05b224e1c56abb4864f01b4f8e3c854e4bb4c7baf1d3f6d652"]}}' > "$dir/$config.json" + tar -cf "$dir/$layerid/layer.tar" --files-from /dev/null ) ( [ -n "$TESTDEBUG" ] && set -x tar -cC "$dir" . | docker load + docker images ) rm -rf "$dir" fi From 943702348d4724999e97e7c48dbc0797d74fc64b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 27 Mar 2019 10:27:39 -0700 Subject: [PATCH 58/73] Update output to match integration tests Signed-off-by: Derek McGowan --- daemon/images/image_pull.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index de0bfebe67f8f..46833293d7bfd 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -74,12 +74,12 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) ongoing := newJobs(ref.Name()) pctx, stopProgress := context.WithCancel(ctx) - progress := make(chan struct{}) + progressC := make(chan struct{}) go func() { // no progress bar, because it hides some debug logs showProgress(pctx, ongoing, ref, i.client.ContentStore(), progressOutput) - close(progress) + close(progressC) }() h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { @@ -200,24 +200,29 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference } } + fref := reference.FamiliarString(ref) c, err := reference.WithDigest(ref, img.Target.Digest) if err != nil { return errors.Wrap(err, "failed to create digest ref") } + var newImage bool img.Name = c.String() _, err = i.client.ImageService().Create(ctx, img) if err != nil { - if ctrerrdefs.IsAlreadyExists(err) { - _, err = i.client.ImageService().Update(ctx, img) - } - if err != nil { + if !ctrerrdefs.IsAlreadyExists(err) { return errors.Wrap(err, "failed to create image") } + } else { + newImage = true } stopProgress() - <-progress + <-progressC + progress.Messagef(progressOutput, "", "Digest: %s", img.Target.Digest.String()) + if newImage { + progress.Messagef(progressOutput, "", "Downloaded newer image for %s", fref) + } return nil } From 952075550a9411a862db0183432b4b3e5fda9064 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 21 Mar 2019 13:07:06 -0700 Subject: [PATCH 59/73] image: restore(): prevent panic This is to avoid nil pointer dereference. Signed-off-by: Kir Kolyshkin --- image/store.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/image/store.go b/image/store.go index 1a8a8a24516c2..de0f1546c0bd6 100644 --- a/image/store.go +++ b/image/store.go @@ -79,7 +79,12 @@ func (is *store) restore() error { logrus.Errorf("not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem()) return nil } - l, err = is.lss[img.OperatingSystem()].Get(chainID) + lss, exists := is.lss[img.OperatingSystem()] + if !exists { + // TODO warning? + return nil + } + l, err = lss.Get(chainID) if err != nil { if err == layer.ErrLayerDoesNotExist { logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) From 1285d07a1e3bcbbdd92972daa745164de598c71f Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 21 Mar 2019 13:07:50 -0700 Subject: [PATCH 60/73] Preliminary image store migration support This migrates image store to containerd. To test: # sudo DOCKER_MIGRATE_IMAGE_STORE=da ./bundles/dynbinary-daemon/dockerd-dev To re-test: # ctr -n moby image ls -q | xargs ctr -n moby image rm # ctr -n moby content ls -q | xargs ctr -n moby content rm TODO: - find a better place for this code - maybe some tests? Signed-off-by: Kir Kolyshkin --- daemon/daemon.go | 87 ++++++++++++++++++++++++++++++++++++++++++++++ image/fs.go | 43 +++++++++++++++++++++++ reference/store.go | 27 +++++++++++++- 3 files changed, 156 insertions(+), 1 deletion(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 723ece351b81b..5cddfa5d54dc5 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -7,6 +7,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" + "encoding/json" "fmt" "io/ioutil" "math/rand" @@ -24,6 +25,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/defaults" + containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/platforms" @@ -1016,6 +1018,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) } + if os.Getenv("DOCKER_MIGRATE_IMAGE_STORE") != "" { + if err := d.Migrate(ctx, ifs, rs); err != nil { + return nil, err + } + os.Exit(0) + } + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { return nil, err @@ -1530,3 +1539,81 @@ func (daemon *Daemon) BuilderBackend() builder.Backend { *images.ImageService }{daemon, daemon.imageService} } + +func (d *Daemon) Migrate(ctx context.Context, ifs image.StoreBackend, rs refstore.WalkableStore) error { + if d.containerdCli == nil { + return errors.New("unable to migrate without containerd") + } + + ctx, done, err := d.containerdCli.WithLease(ctx) + if err != nil { + return err + } + + defer func() { + if err := done(context.Background()); err != nil { + logrus.WithError(err).Error("failed to remove lease") + } + }() + + if err := image.MigrateImageStore(ctx, ifs, d.containerdCli.ContentStore(), images.LabelImageParent); err != nil { + return err + } + + print("Migrating references ") + numRef := 0 + rs.Walk(func(ref reference.Named) error { + id, err := rs.Get(ref) + if err != nil { + logrus.WithError(err).Warnf("can't get digest for %s", id) + return nil + } + config, err := ifs.Get(id) + if err != nil { + logrus.WithError(err).Warnf("can't get config for %s", id) + return nil + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: id, + Size: int64(len(config)), + } + + // find out created time + var img image.Image + if err := json.Unmarshal(config, &img); err != nil { + logrus.WithError(err).Warn("can't parse image") + return nil + } + created := img.Created + // find out updated time + updated := created + if updatedStr, err := ifs.GetMetadata(id, "lastUpdated"); err == nil { + updated, err = time.Parse(time.RFC3339Nano, string(updatedStr)) + if err != nil { + logrus.WithError(err).Warn("can't parse lastUpdated time %q for %s", string(updatedStr), id) + updated = created + } + } + _, err = d.containerdCli.ImageService().Create(ctx, containerdimages.Image{ + Name: ref.String(), + Target: desc, + CreatedAt: created, + UpdatedAt: updated, + Labels: map[string]string{}, // TODO any labels here? + }) + if err != nil { + logrus.WithError(err).Warn("can't create image") + return nil + } + print(".") + // TODO + // rs.Delete(ref) + + numRef++ + return nil + }) + println(" done,", numRef, "references") + return nil +} diff --git a/image/fs.go b/image/fs.go index 7080c8c0155f6..e54d3f6e25b5d 100644 --- a/image/fs.go +++ b/image/fs.go @@ -1,14 +1,18 @@ package image // import "github.com/docker/docker/image" import ( + "bytes" + "context" "fmt" "io/ioutil" "os" "path/filepath" "sync" + "github.com/containerd/containerd/content" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -173,3 +177,42 @@ func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) } + +func MigrateImageStore(ctx context.Context, from StoreBackend, to content.Store, labelParent string) error { + print(`Migrating image store `) + num := 0 + from.Walk(func(id digest.Digest) error { + contents, err := from.Get(id) + if err != nil { + logrus.WithError(err).Errorf("can't get %s", id) + return nil + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: id, + Size: int64(len(contents)), + } + + labels := map[string]string{} + parent, err := from.GetMetadata(id, "parent") + if err == nil { + labels[labelParent] = string(parent) + } + opts := []content.Opt{content.WithLabels(labels)} + + ref := "config-" + id.Algorithm().String() + "-" + id.Encoded() + if err := content.WriteBlob(ctx, to, ref, bytes.NewReader(contents), desc, opts...); err != nil { + logrus.WithError(err).Errorf("can't store config") + } else { + num++ + // TODO + //from.Delete(id) + } + + print(`.`) + return nil + }) + println(" done,", num, "objects") + return nil +} diff --git a/reference/store.go b/reference/store.go index b942c42ca2c7f..caf72d3295ee5 100644 --- a/reference/store.go +++ b/reference/store.go @@ -36,6 +36,14 @@ type Store interface { Get(ref reference.Named) (digest.Digest, error) } +// RefWalkFunc is a callback function type used by WalkableStore.Walk +type RefWalkFunc func(a reference.Named) error + +type WalkableStore interface { + Store + Walk(f RefWalkFunc) error +} + type store struct { mu sync.RWMutex // jsonPath is the path to the file where the serialized tag data is @@ -70,7 +78,7 @@ func (a lexicalAssociations) Less(i, j int) bool { // NewReferenceStore creates a new reference store, tied to a file path where // the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { +func NewReferenceStore(jsonPath string) (WalkableStore, error) { abspath, err := filepath.Abs(jsonPath) if err != nil { return nil, err @@ -346,3 +354,20 @@ func (store *store) reload() error { return nil } + +func (store *store) Walk(f RefWalkFunc) error { + for _, repo := range store.Repositories { + for refStr := range repo { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + continue + } + if err := f(ref); err != nil { + return err + } + } + } + + return nil +} From ac4489bd46ed29378b8f4afd2cdbf4dc6f3bcfef Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Thu, 4 Apr 2019 14:10:19 -0700 Subject: [PATCH 61/73] clean up code and add comments to image_builder Signed-off-by: Anda Xu --- builder/dockerfile/internals.go | 1 - daemon/images/image_builder.go | 19 +++++++------------ 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index ab6cd15a45b83..6a13f48f260f3 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -112,7 +112,6 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta } func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { - logrus.Infof("state.imageID=%s parent.ImageID=%s", state.imageID, parent.ImageID()) newLayer, err := layer.Commit() if err != nil { return err diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index 2efc33b9101bd..a9383380d7801 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -10,14 +10,12 @@ import ( "strings" "time" - "github.com/docker/docker/api/types/container" - - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" "github.com/docker/docker/image" "github.com/docker/docker/layer" @@ -228,7 +226,8 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag if err != nil { return ocispec.Descriptor{}, err } - + // creates a intermediate image that reads parent image info + // and then merge with the new image config var img struct { ocispec.Image @@ -262,6 +261,8 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal config") } } + + // merge with new image config created := time.Now().UTC() img.Created = &created @@ -280,11 +281,6 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag img.Config = newImage.Config img.ContainerConfig = *newImage.ContainerConfig - store, err := i.getLayerStore(ocispec.Platform{OS: newImage.OS}) - if err != nil { - return ocispec.Descriptor{}, err - } - config, err := json.Marshal(img) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") @@ -332,12 +328,11 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag } cache.m.Lock() - if _, ok := cache.layers[driver][layerID]; !ok { cache.layers[driver][layerID] = newLayer.roLayer } else { // Image already retained, don't hold onto layer - defer layer.ReleaseAndLog(store, newLayer.roLayer) + defer layer.ReleaseAndLog(newLayer.layerStore, newLayer.roLayer) } cache.m.Unlock() From 404133b766e44095462ba23ff2f043922e91d1df Mon Sep 17 00:00:00 2001 From: Anda Xu Date: Thu, 4 Apr 2019 14:10:52 -0700 Subject: [PATCH 62/73] resolve runtime image properly for both builder and end user fix already exists error during image tagging Signed-off-by: Anda Xu --- api/types/configs.go | 2 + builder/dockerfile/containerbackend.go | 11 ++++++ builder/dockerfile/imagecontext.go | 2 +- builder/dockerfile/internals.go | 2 +- daemon/create.go | 20 ++++++++-- daemon/images/image.go | 9 +---- daemon/images/image_builder.go | 52 +++++++++++++++++--------- daemon/images/image_tag.go | 8 +++- 8 files changed, 75 insertions(+), 31 deletions(-) diff --git a/api/types/configs.go b/api/types/configs.go index c75abd743084d..124db3535165e 100644 --- a/api/types/configs.go +++ b/api/types/configs.go @@ -3,6 +3,7 @@ package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // configs holds structs used for internal communication between the @@ -13,6 +14,7 @@ import ( type ContainerCreateConfig struct { Name string // TODO(containerd): Add Platform (OS, Architecture, Variant) + Descriptor *ocispec.Descriptor Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go index b65c6222e570d..049e4bf19a201 100644 --- a/builder/dockerfile/containerbackend.go +++ b/builder/dockerfile/containerbackend.go @@ -10,6 +10,8 @@ import ( "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -29,7 +31,16 @@ func newContainerManager(docker builder.ExecBackend) *containerManager { // Create a container func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + // note that all callers calling this function should + // only intend to run an intermediate container during + // the build process so that we can safely make the + // assumption of MediaTypeImageConfig type + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.Digest(runConfig.Image), + } container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{ + Descriptor: &desc, Config: runConfig, HostConfig: hostConfig, }) diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go index ea7801a20f4c6..5fb68cb231605 100644 --- a/builder/dockerfile/imagecontext.go +++ b/builder/dockerfile/imagecontext.go @@ -130,7 +130,7 @@ type containerdImage struct { config *container.Config } -func newContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) *containerdImage { +func NewContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) *containerdImage { return &containerdImage{desc: desc, containerdCli: client, config: config} } diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 6a13f48f260f3..de25ef27c2d12 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -133,7 +133,7 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren } state.imageID = exportedImage.Digest.String() - b.imageSources.Add(newImageMount(newContainerdImage(exportedImage, b.containerdCli, copyRunConfig(state.runConfig)), newLayer)) + b.imageSources.Add(newImageMount(NewContainerdImage(exportedImage, b.containerdCli, copyRunConfig(state.runConfig)), newLayer)) return nil } diff --git a/daemon/create.go b/daemon/create.go index 294a9d967e083..66d588537a832 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -61,14 +61,26 @@ func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context } func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (containertypes.ContainerCreateCreatedBody, error) { - start := time.Now() + var ( + err error + start = time.Now() + ) + if opts.params.Config == nil { return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) } - if opts.params.Config.Image != "" { - var err error - opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, opts.params.Config.Image) + if opts.params.Descriptor != nil { + opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, *opts.params.Descriptor) + if err != nil { + return containertypes.ContainerCreateCreatedBody{}, errors.Wrapf(err, "no runtime image found") + } + } else if opts.params.Config.Image != "" { + desc, err := daemon.imageService.ResolveImage(ctx, opts.params.Config.Image) + if err != nil { + return containertypes.ContainerCreateCreatedBody{}, errors.Wrapf(err, "failed to resolve image %s", opts.params.Config.Image) + } + opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, desc) if err != nil { return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(err) } diff --git a/daemon/images/image.go b/daemon/images/image.go index b24bde4c6c94e..9328d85905d54 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -15,7 +15,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -134,12 +134,7 @@ type RuntimeImage struct { // A runtime image is platform specific. // The platform is resolved based on availability in the image and // the order preference of the backend storage drivers. -func (i *ImageService) ResolveRuntimeImage(ctx context.Context, refOrID string) (RuntimeImage, error) { - desc, err := i.ResolveImage(ctx, refOrID) - if err != nil { - return RuntimeImage{}, err - } - +func (i *ImageService) ResolveRuntimeImage(ctx context.Context, desc ocispec.Descriptor) (RuntimeImage, error) { runtimeImages, err := i.runtimeImages(ctx, desc) if err != nil { return RuntimeImage{}, err diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index a9383380d7801..9c85e4f2a5ea7 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -10,6 +10,8 @@ import ( "strings" "time" + "github.com/opencontainers/image-spec/identity" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" @@ -17,6 +19,7 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" @@ -134,15 +137,15 @@ func (l *rwLayer) Release() error { return nil } -func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLayer, error) { - if img == nil || img.RootFS.ChainID() == "" { +func newROLayerForImage(chainID layer.ChainID, layerStore layer.Store) (builder.ROLayer, error) { + if chainID == "" { return &roLayer{layerStore: layerStore}, nil } // Hold a reference to the image layer so that it can't be removed before // it is released - layer, err := layerStore.Get(img.RootFS.ChainID()) + layer, err := layerStore.Get(chainID) if err != nil { - return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + return nil, err } return &roLayer{layerStore: layerStore, roLayer: layer}, nil } @@ -186,22 +189,39 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s if !system.IsOSSupported(os) { return nil, nil, system.ErrNotSupportedOperatingSystem } - layer, err := newROLayerForImage(nil, i.layerStores[os]) + layer, err := newROLayerForImage("", i.layerStores[os]) return nil, layer, err } if opts.PullOption != backend.PullOptionForcePull { - image, err := i.getDockerImage(refOrID) - if err != nil && opts.PullOption == backend.PullOptionNoPull { - return nil, nil, err + desc, err := i.ResolveImage(ctx, refOrID) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to resolve image %s", refOrID) } - // TODO: shouldn't we error out if error is different from "not found" ? - if image != nil { - if !system.IsOSSupported(image.OperatingSystem()) { - return nil, nil, system.ErrNotSupportedOperatingSystem + rImage, err := i.ResolveRuntimeImage(ctx, desc) + if err != nil { + if opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + } else { + var img struct { + ocispec.Image + Config *container.Config `json:"config,omitempty"` } - layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) - return image, layer, err + + if err := json.Unmarshal(rImage.ConfigBytes, &img); err != nil { + return nil, nil, errors.Wrap(err, "failed to unmarshal config") + } + ci := dockerfile.NewContainerdImage(rImage.Config, i.client, img.Config) + store, err := i.getLayerStore(rImage.Platform) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get layer store") + } + layer, err := newROLayerForImage(layer.ChainID(identity.ChainID(img.RootFS.DiffIDs)), store) + if err != nil { + err = errors.Wrapf(err, "failed to get layer for image %s", refOrID) + } + return ci, layer, err } } @@ -212,7 +232,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s if !system.IsOSSupported(image.OperatingSystem()) { return nil, nil, system.ErrNotSupportedOperatingSystem } - layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) + layer, err := newROLayerForImage(image.RootFS.ChainID(), i.layerStores[image.OperatingSystem()]) return image, layer, err } @@ -220,8 +240,6 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s // This is similar to LoadImage() except that it receives JSON encoded bytes of // an image instead of a tar archive. func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImageConfig, newROLayer builder.ROLayer) (ocispec.Descriptor, error) { - // TODO(containerd): use containerd's image store - cache, err := i.getCache(ctx) if err != nil { return ocispec.Descriptor{}, err diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index c78deee08f816..1fbc57586cc5c 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -3,6 +3,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -41,7 +42,12 @@ func (i *ImageService) TagImageWithReference(ctx context.Context, target ocispec is := i.client.ImageService() _, err := is.Create(ctx, im) if err != nil { - return errors.Wrap(err, "failed to create image") + if errdefs.IsAlreadyExists(err) { + _, err = i.client.ImageService().Update(ctx, im) + } + if err != nil { + return errors.Wrap(err, "failed to create image") + } } i.LogImageEvent(ctx, target.Digest.String(), reference.FamiliarString(newTag), "tag") From d74da29df21b6b543da3cd367049db76adf68989 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 5 Apr 2019 16:41:15 -0700 Subject: [PATCH 63/73] Move migration to separate function Remove image store instantiation from image service Signed-off-by: Derek McGowan --- builder/builder-next/builder.go | 4 +- cmd/dockerd/daemon.go | 6 +- daemon/daemon.go | 153 ++------------------- daemon/images/cache.go | 29 ++-- daemon/images/image_import.go | 199 +++++++++++++-------------- daemon/images/image_prune.go | 230 ++++++++++++++++---------------- daemon/images/images.go | 140 +++++++++---------- daemon/images/service.go | 70 +++------- daemon/migration.go | 149 +++++++++++++++++++++ 9 files changed, 477 insertions(+), 503 deletions(-) create mode 100644 daemon/migration.go diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index bb701106f637a..091f6caac1d6b 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -15,8 +15,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" + "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" - "github.com/docker/docker/daemon/images" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/system" @@ -68,7 +68,7 @@ var cacheFields = map[string]bool{ type Opt struct { SessionManager *session.Manager Root string - Dist images.DistributionServices + Dist daemon.DistributionServices NetworkController libnetwork.NetworkController DefaultCgroupParent string ResolverOpt resolver.ResolveOptionsFunc diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index af4b0af246eba..cc47ad7893db2 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -309,10 +309,14 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e return opts, err } cgroupParent := newCgroupParent(config) + ds, err := d.DistributionServices() + if err != nil { + return opts, errors.Wrap(err, "failed to get distribution services") + } bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), - Dist: d.DistributionServices(), + Dist: ds, NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, ResolverOpt: d.NewResolveOptionsFunc(), diff --git a/daemon/daemon.go b/daemon/daemon.go index 5cddfa5d54dc5..a236544f331df 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -7,7 +7,6 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" - "encoding/json" "fmt" "io/ioutil" "math/rand" @@ -25,7 +24,6 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/defaults" - containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/platforms" @@ -52,9 +50,7 @@ import ( // register graph drivers _ "github.com/docker/docker/daemon/graphdriver/register" "github.com/docker/docker/daemon/stats" - dmetadata "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" @@ -66,7 +62,6 @@ import ( "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/plugin" pluginexec "github.com/docker/docker/plugin/executor/containerd" - refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" volumesservice "github.com/docker/docker/volume/service" @@ -944,8 +939,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // TODO(containerd): probe system for additional configured graph drivers } - layerStores := make(map[string]layer.Store) - var backends []images.LayerBackend d.graphDrivers = make(map[string]string) for _, driver := range storageDrivers { @@ -977,21 +970,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return nil, err - } - - lgrMap := make(map[string]image.LayerGetReleaser) - for os, ls := range layerStores { - lgrMap[os] = ls - } - imageStore, err := image.NewImageStore(ifs, lgrMap) - if err != nil { - return nil, err - } - d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d) if err != nil { return nil, err @@ -1002,32 +980,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - // We have a single tag/reference store for the daemon globally. However, it's - // stored under the graphdriver. On host platforms which only support a single - // container OS, but multiple selectable graphdrivers, this means depending on which - // graphdriver is chosen, the global reference store is under there. For - // platforms which support multiple container operating systems, this is slightly - // more problematic as where does the global ref store get located? Fortunately, - // for Windows, which is currently the only daemon supporting multiple container - // operating systems, the list of graphdrivers available isn't user configurable. - // For backwards compatibility, we just put it under the windowsfilter - // directory regardless. - refStoreLocation := filepath.Join(imageRoot, `repositories.json`) - rs, err := refstore.NewReferenceStore(refStoreLocation) - if err != nil { - return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) - } - + // TODO(containerd): Check migration from on disk state + // TODO(containerd): Perform migration after image service creation if os.Getenv("DOCKER_MIGRATE_IMAGE_STORE") != "" { - if err := d.Migrate(ctx, ifs, rs); err != nil { + // TODO(containerd): Pass in preferred driver name for OS + if err := d.Migrate(ctx, config.Root); err != nil { return nil, err } - os.Exit(0) - } - - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) - if err != nil { - return nil, err } // Discovery is only enabled when the daemon is launched with an address to advertise. When @@ -1065,18 +1024,15 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // used above to run migration. They could be initialized in ImageService // if migration is called from daemon/images. layerStore might move as well. d.imageService = images.NewImageService(images.ImageServiceConfig{ - DefaultNamespace: ContainersNamespace, - DefaultPlatform: storageDrivers[0].platform, - Client: d.containerdCli, - ContainerStore: d.containers, - DistributionMetadataStore: distributionMetadataStore, - EventsService: d.EventsService, - ImageStore: imageStore, - LayerBackends: backends, - MaxConcurrentDownloads: *config.MaxConcurrentDownloads, - MaxConcurrentUploads: *config.MaxConcurrentUploads, - ReferenceStore: rs, - RegistryService: registryService, + DefaultNamespace: ContainersNamespace, + DefaultPlatform: storageDrivers[0].platform, + Client: d.containerdCli, + ContainerStore: d.containers, + EventsService: d.EventsService, + LayerBackends: backends, + MaxConcurrentDownloads: *config.MaxConcurrentDownloads, + MaxConcurrentUploads: *config.MaxConcurrentUploads, + RegistryService: registryService, }) // TODO(containerd): create earlier, background, and wait at end @@ -1134,11 +1090,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return d, nil } -// DistributionServices returns services controlling daemon storage -func (daemon *Daemon) DistributionServices() images.DistributionServices { - return daemon.imageService.DistributionServices() -} - func (daemon *Daemon) waitForStartupDone() { <-daemon.startupDone } @@ -1539,81 +1490,3 @@ func (daemon *Daemon) BuilderBackend() builder.Backend { *images.ImageService }{daemon, daemon.imageService} } - -func (d *Daemon) Migrate(ctx context.Context, ifs image.StoreBackend, rs refstore.WalkableStore) error { - if d.containerdCli == nil { - return errors.New("unable to migrate without containerd") - } - - ctx, done, err := d.containerdCli.WithLease(ctx) - if err != nil { - return err - } - - defer func() { - if err := done(context.Background()); err != nil { - logrus.WithError(err).Error("failed to remove lease") - } - }() - - if err := image.MigrateImageStore(ctx, ifs, d.containerdCli.ContentStore(), images.LabelImageParent); err != nil { - return err - } - - print("Migrating references ") - numRef := 0 - rs.Walk(func(ref reference.Named) error { - id, err := rs.Get(ref) - if err != nil { - logrus.WithError(err).Warnf("can't get digest for %s", id) - return nil - } - config, err := ifs.Get(id) - if err != nil { - logrus.WithError(err).Warnf("can't get config for %s", id) - return nil - } - - desc := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: id, - Size: int64(len(config)), - } - - // find out created time - var img image.Image - if err := json.Unmarshal(config, &img); err != nil { - logrus.WithError(err).Warn("can't parse image") - return nil - } - created := img.Created - // find out updated time - updated := created - if updatedStr, err := ifs.GetMetadata(id, "lastUpdated"); err == nil { - updated, err = time.Parse(time.RFC3339Nano, string(updatedStr)) - if err != nil { - logrus.WithError(err).Warn("can't parse lastUpdated time %q for %s", string(updatedStr), id) - updated = created - } - } - _, err = d.containerdCli.ImageService().Create(ctx, containerdimages.Image{ - Name: ref.String(), - Target: desc, - CreatedAt: created, - UpdatedAt: updated, - Labels: map[string]string{}, // TODO any labels here? - }) - if err != nil { - logrus.WithError(err).Warn("can't create image") - return nil - } - print(".") - // TODO - // rs.Delete(ref) - - numRef++ - return nil - }) - println(" done,", numRef, "references") - return nil -} diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 03347afc450c0..5c23cd1af319d 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -9,10 +9,8 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/docker/docker/builder" - buildcache "github.com/docker/docker/image/cache" "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) type cache struct { @@ -97,20 +95,23 @@ func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { // MakeImageCache creates a stateful image cache for build. func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { - if len(sourceRefs) == 0 { - return buildcache.NewLocal(i.imageStore) - } + return nil + /* + if len(sourceRefs) == 0 { + return buildcache.NewLocal(i.imageStore) + } - cache := buildcache.New(i.imageStore) + cache := buildcache.New(i.imageStore) - for _, ref := range sourceRefs { - img, err := i.getDockerImage(ref) - if err != nil { - logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) - continue + for _, ref := range sourceRefs { + img, err := i.getDockerImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.Populate(img) } - cache.Populate(img) - } - return cache + return cache + */ } diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index 88aa4f3c94f30..3b7b2383b015a 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -2,25 +2,9 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "encoding/json" "io" - "net/http" - "net/url" - "runtime" - "strings" - "time" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/dockerversion" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" "github.com/pkg/errors" ) @@ -29,113 +13,116 @@ import ( // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (i *ImageService) ImportImage(ctx context.Context, src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - var ( - rc io.ReadCloser - resp *http.Response - newRef reference.Named - ) + return errdefs.NotImplemented(errors.New("import image not implemented")) + /* + var ( + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) - // Default the operating system if not supplied. - if os == "" { - os = runtime.GOOS - } - - if repository != "" { - var err error - newRef, err = reference.ParseNormalizedNamed(repository) - if err != nil { - return errdefs.InvalidParameter(err) + // Default the operating system if not supplied. + if os == "" { + os = runtime.GOOS } - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errdefs.InvalidParameter(errors.New("cannot import digest reference")) + + if repository != "" { + var err error + newRef, err = reference.ParseNormalizedNamed(repository) + if err != nil { + return errdefs.InvalidParameter(err) + } + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errdefs.InvalidParameter(errors.New("cannot import digest reference")) + } + + if tag != "" { + //set newRef + _, err = reference.WithTag(newRef, tag) + if err != nil { + return errdefs.InvalidParameter(err) + } + } } - if tag != "" { - //set newRef - _, err = reference.WithTag(newRef, tag) + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + if len(strings.Split(src, "://")) == 1 { + src = "http://" + src + } + u, err := url.Parse(src) if err != nil { return errdefs.InvalidParameter(err) } + + resp, err = remotecontext.GetWithStatusError(u.String()) + if err != nil { + return err + } + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } - } - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) - if err != nil { - return err - } - if src == "-" { - rc = inConfig - } else { - inConfig.Close() - if len(strings.Split(src, "://")) == 1 { - src = "http://" + src + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src } - u, err := url.Parse(src) + + inflatedLayerData, err := archive.DecompressStream(rc) if err != nil { - return errdefs.InvalidParameter(err) + return err } - - resp, err = remotecontext.GetWithStatusError(u.String()) + l, err := i.layerStores[os].Register(inflatedLayerData, "") if err != nil { return err } - outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) - progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) - rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") - } + defer layer.ReleaseAndLog(i.layerStores[os], l) - defer rc.Close() - if len(msg) == 0 { - msg = "Imported from " + src - } - - inflatedLayerData, err := archive.DecompressStream(rc) - if err != nil { - return err - } - l, err := i.layerStores[os].Register(inflatedLayerData, "") - if err != nil { - return err - } - defer layer.ReleaseAndLog(i.layerStores[os], l) - - created := time.Now().UTC() - imgConfig, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: config, - Architecture: runtime.GOARCH, - OS: os, - Created: created, - Comment: msg, - }, - RootFS: &image.RootFS{ - Type: "layers", - DiffIDs: []layer.DiffID{l.DiffID()}, - }, - History: []image.History{{ - Created: created, - Comment: msg, - }}, - }) - if err != nil { - return err - } + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: os, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } - // TODO(containerd): Use content store + image store - id, err := i.imageStore.Create(imgConfig) - if err != nil { - return err - } + // TODO(containerd): Use content store + image store + id, err := i.imageStore.Create(imgConfig) + if err != nil { + return err + } - // FIXME: connect with commit code and call refstore directly - //if newRef != nil { - // if err := i.TagImageWithReference(dgst, newRef); err != nil { - // return err - // } - //} + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := i.TagImageWithReference(dgst, newRef); err != nil { + return err + } + } - i.LogImageEvent(ctx, id.String(), id.String(), "import") - outStream.Write(streamformatter.FormatStatus("", id.String())) - return nil + i.LogImageEvent(ctx, id.String(), id.String(), "import") + outStream.Write(streamformatter.FormatStatus("", id.String())) + return nil + */ } diff --git a/daemon/images/image_prune.go b/daemon/images/image_prune.go index f1accfded1676..49a4453a9b0e3 100644 --- a/daemon/images/image_prune.go +++ b/daemon/images/image_prune.go @@ -2,20 +2,11 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "fmt" - "sync/atomic" - "time" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var imagesAcceptedFilters = map[string]bool{ @@ -31,138 +22,142 @@ var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already // ImagesPrune removes unused images func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { - if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { - return nil, errPruneRunning - } - defer atomic.StoreInt32(&i.pruneRunning, 0) + return nil, errdefs.NotImplemented(errors.New("prune not implemented")) + /* + if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&i.pruneRunning, 0) - // make sure that only accepted filters have been received - err := pruneFilters.Validate(imagesAcceptedFilters) - if err != nil { - return nil, err - } + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } - rep := &types.ImagesPruneReport{} + rep := &types.ImagesPruneReport{} - danglingOnly := true - if pruneFilters.Contains("dangling") { - if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { - danglingOnly = false - } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { - return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} - } - } - - until, err := getUntilFromPruneFilters(pruneFilters) - if err != nil { - return nil, err - } + danglingOnly := true + if pruneFilters.Contains("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} + } + } - var allImages map[image.ID]*image.Image - if danglingOnly { - allImages = i.imageStore.Heads() - } else { - allImages = i.imageStore.Map() - } + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } - // Filter intermediary images and get their unique size - allLayers := make(map[layer.ChainID]layer.Layer) - for _, ls := range i.layerStores { - for k, v := range ls.Map() { - allLayers[k] = v - } - } - topImages := map[image.ID]*image.Image{} - for id, img := range allImages { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - dgst := digest.Digest(id) - if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { - continue + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = i.imageStore.Heads() + } else { + allImages = i.imageStore.Map() } - if !until.IsZero() && img.Created.After(until) { - continue + + // Filter intermediary images and get their unique size + allLayers := make(map[layer.ChainID]layer.Layer) + for _, ls := range i.layerStores { + for k, v := range ls.Map() { + allLayers[k] = v + } } - if img.V1Image.Config != nil && !matchLabels(pruneFilters, img.V1Image.Config.Labels) { - continue + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if img.V1Image.Config != nil && !matchLabels(pruneFilters, img.V1Image.Config.Labels) { + continue + } + topImages[id] = img + } } - topImages[id] = img - } - } - canceled := false -deleteImagesLoop: - for id := range topImages { - select { - case <-ctx.Done(): - // we still want to calculate freed size and return the data - canceled = true - break deleteImagesLoop - default: - } - - deletedImages := []types.ImageDeleteResponseItem{} - refs := i.referenceStore.References(id.Digest()) - if len(refs) > 0 { - shouldDelete := !danglingOnly - if !shouldDelete { - hasTag := false - for _, ref := range refs { - if _, ok := ref.(reference.NamedTagged); ok { - hasTag = true - break - } + canceled := false + deleteImagesLoop: + for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: } - // Only delete if it's untagged (i.e. repo:) - shouldDelete = !hasTag - } + deletedImages := []types.ImageDeleteResponseItem{} + refs := i.referenceStore.References(id.Digest()) + if len(refs) > 0 { + shouldDelete := !danglingOnly + if !shouldDelete { + hasTag := false + for _, ref := range refs { + if _, ok := ref.(reference.NamedTagged); ok { + hasTag = true + break + } + } + + // Only delete if it's untagged (i.e. repo:) + shouldDelete = !hasTag + } - if shouldDelete { - for _, ref := range refs { - imgDel, err := i.ImageDelete(ctx, ref.String(), false, true) - if imageDeleteFailed(ref.String(), err) { + if shouldDelete { + for _, ref := range refs { + imgDel, err := i.ImageDelete(ctx, ref.String(), false, true) + if imageDeleteFailed(ref.String(), err) { + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } + } else { + hex := id.Digest().Hex() + imgDel, err := i.ImageDelete(ctx, hex, false, true) + if imageDeleteFailed(hex, err) { continue } deletedImages = append(deletedImages, imgDel...) } - } - } else { - hex := id.Digest().Hex() - imgDel, err := i.ImageDelete(ctx, hex, false, true) - if imageDeleteFailed(hex, err) { - continue - } - deletedImages = append(deletedImages, imgDel...) - } - rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) - } + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } - // Compute how much space was freed - for _, d := range rep.ImagesDeleted { - if d.Deleted != "" { - chid := layer.ChainID(d.Deleted) - if l, ok := allLayers[chid]; ok { - diffSize, err := l.DiffSize() - if err != nil { - logrus.Warnf("failed to get layer %s size: %v", chid, err) - continue + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } } - rep.SpaceReclaimed += uint64(diffSize) } - } - } - if canceled { - logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) - } + if canceled { + logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) + } - return rep, nil + return rep, nil + */ } +/* func imageDeleteFailed(ref string, err error) bool { switch { case err == nil: @@ -209,3 +204,4 @@ func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { until = time.Unix(seconds, nanoseconds) return until, nil } +*/ diff --git a/daemon/images/images.go b/daemon/images/images.go index c9c215b2d64aa..c4a90acf85acc 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -2,7 +2,6 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" - "encoding/json" "fmt" "sort" "strings" @@ -17,9 +16,9 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" ) var acceptedImageFilterTags = map[string]bool{ @@ -361,86 +360,89 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al // The existing image(s) is not destroyed. // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. func (i *ImageService) SquashImage(id, parent string) (string, error) { + return "", errdefs.NotImplemented(errors.New("squash not implemented")) + + /* + var ( + img *image.Image + err error + ) + if img, err = i.imageStore.Get(image.ID(id)); err != nil { + return "", err + } - var ( - img *image.Image - err error - ) - if img, err = i.imageStore.Get(image.ID(id)); err != nil { - return "", err - } - - var parentImg *image.Image - var parentChainID layer.ChainID - if len(parent) != 0 { - parentImg, err = i.imageStore.Get(image.ID(parent)) + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = i.imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + if !system.IsOSSupported(img.OperatingSystem()) { + return "", errors.Wrap(err, system.ErrNotSupportedOperatingSystem.Error()) + } + l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) if err != nil { - return "", errors.Wrap(err, "error getting specified parent layer") + return "", errors.Wrap(err, "error getting image layer") } - parentChainID = parentImg.RootFS.ChainID() - } else { - rootFS := image.NewRootFS() - parentImg = &image.Image{RootFS: rootFS} - } - if !system.IsOSSupported(img.OperatingSystem()) { - return "", errors.Wrap(err, system.ErrNotSupportedOperatingSystem.Error()) - } - l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) - if err != nil { - return "", errors.Wrap(err, "error getting image layer") - } - defer i.layerStores[img.OperatingSystem()].Release(l) + defer i.layerStores[img.OperatingSystem()].Release(l) - ts, err := l.TarStreamFrom(parentChainID) - if err != nil { - return "", errors.Wrapf(err, "error getting tar stream to parent") - } - defer ts.Close() + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() - newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) - if err != nil { - return "", errors.Wrap(err, "error registering layer") - } - defer i.layerStores[img.OperatingSystem()].Release(newL) + newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer i.layerStores[img.OperatingSystem()].Release(newL) - newImage := *img - newImage.RootFS = nil + newImage := *img + newImage.RootFS = nil - rootFS := *parentImg.RootFS - rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) - newImage.RootFS = &rootFS + rootFS := *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS - for i, hi := range newImage.History { - if i >= len(parentImg.History) { - hi.EmptyLayer = true + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi } - newImage.History[i] = hi - } - now := time.Now() - var historyComment string - if len(parent) > 0 { - historyComment = fmt.Sprintf("merge %s to %s", id, parent) - } else { - historyComment = fmt.Sprintf("create new from %s", id) - } + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } - newImage.History = append(newImage.History, image.History{ - Created: now, - Comment: historyComment, - }) - newImage.Created = now + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now - b, err := json.Marshal(&newImage) - if err != nil { - return "", errors.Wrap(err, "error marshalling image config") - } + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } - newImgID, err := i.imageStore.Create(b) - if err != nil { - return "", errors.Wrap(err, "error creating new image after squash") - } - return string(newImgID), nil + newImgID, err := i.imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil + */ } func newImage(image *image.Image, size int64) *types.ImageSummary { diff --git a/daemon/images/service.go b/daemon/images/service.go index 6ed2f5605acea..eba74c9f251ce 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -9,14 +9,9 @@ import ( "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" - "github.com/docker/docker/distribution" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/system" - dockerreference "github.com/docker/docker/reference" "github.com/docker/docker/registry" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -43,19 +38,17 @@ type ImageServiceConfig struct { DefaultNamespace string DefaultPlatform ocispec.Platform Client *containerd.Client - ContainerStore containerStore EventsService *daemonevents.Events LayerBackends []LayerBackend MaxConcurrentDownloads int MaxConcurrentUploads int - // deprecated - DistributionMetadataStore metadata.Store - // deprecated - ImageStore image.Store - // deprecated - ReferenceStore dockerreference.Store - // deprecated + // TODO(containerd): use containerd client after containers updated + // to store all metadata in containerd + ContainerStore containerStore + + // TODO(containerd): deprecated, move functions which require this + // out of image service RegistryService registry.Service } @@ -70,7 +63,7 @@ func NewImageService(config ImageServiceConfig) *ImageService { pc.matchers = append(pc.matchers, backend.Platform) layerStores[backend.DriverName()] = backend.Store } - // TODO(containerd): Store backends by name and ordered + return &ImageService{ namespace: config.DefaultNamespace, defaultPlatform: config.DefaultPlatform, @@ -82,12 +75,7 @@ func NewImageService(config ImageServiceConfig) *ImageService { layerBackends: config.LayerBackends, layerStores: layerStores, - distributionMetadataStore: config.DistributionMetadataStore, - imageStore: config.ImageStore, - referenceStore: config.ReferenceStore, - registryService: config.RegistryService, - //downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), - //uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), + registryService: config.RegistryService, } } @@ -136,33 +124,7 @@ type ImageService struct { cacheL sync.RWMutex // To be replaced by containerd client - registryService registry.Service - referenceStore dockerreference.Store - imageStore image.Store - distributionMetadataStore metadata.Store - downloadManager *xfer.LayerDownloadManager - uploadManager *xfer.LayerUploadManager -} - -// DistributionServices provides daemon image storage services -type DistributionServices struct { - DownloadManager distribution.RootFSDownloadManager - V2MetadataService metadata.V2MetadataService - LayerStore layer.Store // TODO: lcow - ImageStore image.Store - ReferenceStore dockerreference.Store -} - -// DistributionServices return services controlling daemon image storage -// deprecated: use containerd client -func (i *ImageService) DistributionServices() DistributionServices { - return DistributionServices{ - DownloadManager: i.downloadManager, - V2MetadataService: metadata.NewV2MetadataService(i.distributionMetadataStore), - LayerStore: i.layerBackends[0].Store, - ImageStore: i.imageStore, - ReferenceStore: i.referenceStore, - } + registryService registry.Service } // CountImages returns the number of images stored by ImageService @@ -196,6 +158,12 @@ func (i *ImageService) GetImageBackend(image RuntimeImage) (layer.Store, error) return nil, errdefs.System(errors.Wrapf(system.ErrNotSupportedOperatingSystem, "no layer storage backend configured for %s", image.Platform.OS)) } +// GetLayerStore returns the best layer store for the given platform +// Deprecated: Do not access layer stores directly, snapshotters may be used +func (i *ImageService) GetLayerStore(platform ocispec.Platform) (layer.Store, error) { + return i.getLayerStore(platform) +} + func (i *ImageService) getLayerStore(platform ocispec.Platform) (layer.Store, error) { for _, backend := range i.layerBackends { if backend.Platform.Match(platform) { @@ -314,11 +282,5 @@ func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { // // called from reload.go func (i *ImageService) UpdateConfig(maxDownloads, maxUploads *int) { - // TODO(containerd): store these locally to configure resolver - if i.downloadManager != nil && maxDownloads != nil { - i.downloadManager.SetConcurrency(*maxDownloads) - } - if i.uploadManager != nil && maxUploads != nil { - i.uploadManager.SetConcurrency(*maxUploads) - } + // TODO(containerd): store these locally to configure download/upload } diff --git a/daemon/migration.go b/daemon/migration.go new file mode 100644 index 0000000000000..e72f4ebc947da --- /dev/null +++ b/daemon/migration.go @@ -0,0 +1,149 @@ +package daemon + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "runtime" + "time" + + containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + "github.com/docker/docker/daemon/images" + "github.com/docker/docker/distribution" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + dockerreference "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func (d *Daemon) Migrate(ctx context.Context, root string) error { + if d.containerdCli == nil { + return errors.New("unable to migrate without containerd") + } + + // TODO(containerd): Migrate ALL configured graph drivers, in reverse order + // to keep latest configured driver with the image store + imageRoot := filepath.Join(root, "image", d.graphDrivers[runtime.GOOS]) + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return err + } + + // We have a single tag/reference store for the daemon globally. However, it's + // stored under the graphdriver. On host platforms which only support a single + // container OS, but multiple selectable graphdrivers, this means depending on which + // graphdriver is chosen, the global reference store is under there. For + // platforms which support multiple container operating systems, this is slightly + // more problematic as where does the global ref store get located? Fortunately, + // for Windows, which is currently the only daemon supporting multiple container + // operating systems, the list of graphdrivers available isn't user configurable. + // For backwards compatibility, we just put it under the windowsfilter + // directory regardless. + refStoreLocation := filepath.Join(imageRoot, `repositories.json`) + rs, err := refstore.NewReferenceStore(refStoreLocation) + if err != nil { + return fmt.Errorf("Couldn't create reference store repository: %s", err) + } + + ctx, done, err := d.containerdCli.WithLease(ctx) + if err != nil { + return err + } + + defer func() { + if err := done(context.Background()); err != nil { + logrus.WithError(err).Error("failed to remove lease") + } + }() + + if err := image.MigrateImageStore(ctx, ifs, d.containerdCli.ContentStore(), images.LabelImageParent); err != nil { + return err + } + + print("Migrating references ") + numRef := 0 + rs.Walk(func(ref reference.Named) error { + id, err := rs.Get(ref) + if err != nil { + logrus.WithError(err).Warnf("can't get digest for %s", id) + return nil + } + config, err := ifs.Get(id) + if err != nil { + logrus.WithError(err).Warnf("can't get config for %s", id) + return nil + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: id, + Size: int64(len(config)), + } + + // find out created time + var img image.Image + if err := json.Unmarshal(config, &img); err != nil { + logrus.WithError(err).Warn("can't parse image") + return nil + } + created := img.Created + // find out updated time + updated := created + if updatedStr, err := ifs.GetMetadata(id, "lastUpdated"); err == nil { + updated, err = time.Parse(time.RFC3339Nano, string(updatedStr)) + if err != nil { + logrus.WithError(err).Warn("can't parse lastUpdated time %q for %s", string(updatedStr), id) + updated = created + } + } + _, err = d.containerdCli.ImageService().Create(ctx, containerdimages.Image{ + Name: ref.String(), + Target: desc, + CreatedAt: created, + UpdatedAt: updated, + Labels: map[string]string{}, // TODO any labels here? + }) + if err != nil { + logrus.WithError(err).Warn("can't create image") + return nil + } + print(".") + // TODO + // rs.Delete(ref) + + numRef++ + return nil + }) + println(" done,", numRef, "references") + return nil +} + +// DEPRECATED AFTER MIGRATION + +// DistributionServices provides daemon image storage services +type DistributionServices struct { + DownloadManager distribution.RootFSDownloadManager + V2MetadataService metadata.V2MetadataService + LayerStore layer.Store + ImageStore image.Store + ReferenceStore dockerreference.Store +} + +// DistributionServices returns services controlling daemon storage +// TODO(containerd): Remove this +func (daemon *Daemon) DistributionServices() (DistributionServices, error) { + ls, err := daemon.imageService.GetLayerStore(platforms.DefaultSpec()) + if err != nil { + return DistributionServices{}, err + } + return DistributionServices{ + LayerStore: ls, + }, nil +} From 214c1f3a5b532f8141fd7a7d27f1b542658013a4 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 8 Apr 2019 16:15:19 -0700 Subject: [PATCH 64/73] Fix validation Signed-off-by: Derek McGowan --- api/types/container/container_wait.go | 16 ++--- builder/dockerfile/imagecontext.go | 3 +- daemon/images/image_prune.go | 14 ++-- daemon/migration.go | 5 +- image/fs.go | 1 + reference/store.go | 1 + .../containerd/services/content/service.go | 50 +++++++++++++ .../containerd/services/content/store.go | 71 +++++++++++++++++++ .../containerd/services/services.go | 36 ++++++++++ 9 files changed, 179 insertions(+), 18 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/services/content/service.go create mode 100644 vendor/github.com/containerd/containerd/services/content/store.go create mode 100644 vendor/github.com/containerd/containerd/services/services.go diff --git a/api/types/container/container_wait.go b/api/types/container/container_wait.go index 7eec9d83e47cc..94b6a20e159b6 100644 --- a/api/types/container/container_wait.go +++ b/api/types/container/container_wait.go @@ -7,6 +7,14 @@ package container // import "github.com/docker/docker/api/types/container" // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + // ContainerWaitOKBody OK response to ContainerWait operation // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { @@ -19,11 +27,3 @@ type ContainerWaitOKBody struct { // Required: true StatusCode int64 `json:"StatusCode"` } - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go index 5fb68cb231605..165b677b1413b 100644 --- a/builder/dockerfile/imagecontext.go +++ b/builder/dockerfile/imagecontext.go @@ -130,7 +130,8 @@ type containerdImage struct { config *container.Config } -func NewContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) *containerdImage { +// NewContainerdImage returns a containerd image given a container config +func NewContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) builder.Image { return &containerdImage{desc: desc, containerdCli: client, config: config} } diff --git a/daemon/images/image_prune.go b/daemon/images/image_prune.go index 49a4453a9b0e3..fb5ddce881ead 100644 --- a/daemon/images/image_prune.go +++ b/daemon/images/image_prune.go @@ -9,16 +9,16 @@ import ( "github.com/pkg/errors" ) -var imagesAcceptedFilters = map[string]bool{ - "dangling": true, - "label": true, - "label!": true, - "until": true, -} +//var imagesAcceptedFilters = map[string]bool{ +// "dangling": true, +// "label": true, +// "label!": true, +// "until": true, +//} // errPruneRunning is returned when a prune request is received while // one is in progress -var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) +//var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) // ImagesPrune removes unused images func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { diff --git a/daemon/migration.go b/daemon/migration.go index e72f4ebc947da..9915998e6ed0c 100644 --- a/daemon/migration.go +++ b/daemon/migration.go @@ -23,6 +23,7 @@ import ( "github.com/sirupsen/logrus" ) +// Migrate migrates the given root directory to containerd func (d *Daemon) Migrate(ctx context.Context, root string) error { if d.containerdCli == nil { return errors.New("unable to migrate without containerd") @@ -138,8 +139,8 @@ type DistributionServices struct { // DistributionServices returns services controlling daemon storage // TODO(containerd): Remove this -func (daemon *Daemon) DistributionServices() (DistributionServices, error) { - ls, err := daemon.imageService.GetLayerStore(platforms.DefaultSpec()) +func (d *Daemon) DistributionServices() (DistributionServices, error) { + ls, err := d.imageService.GetLayerStore(platforms.DefaultSpec()) if err != nil { return DistributionServices{}, err } diff --git a/image/fs.go b/image/fs.go index e54d3f6e25b5d..a23fbd0d8be46 100644 --- a/image/fs.go +++ b/image/fs.go @@ -178,6 +178,7 @@ func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) } +// MigrateImageStore migrates the given image store to containerd content store func MigrateImageStore(ctx context.Context, from StoreBackend, to content.Store, labelParent string) error { print(`Migrating image store `) num := 0 diff --git a/reference/store.go b/reference/store.go index caf72d3295ee5..eda78c68db761 100644 --- a/reference/store.go +++ b/reference/store.go @@ -39,6 +39,7 @@ type Store interface { // RefWalkFunc is a callback function type used by WalkableStore.Walk type RefWalkFunc func(a reference.Named) error +// WalkableStore represents a walkable reference store type WalkableStore interface { Store Walk(f RefWalkFunc) error diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go new file mode 100644 index 0000000000000..43320d54d0527 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/service.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/containerd/containerd/services/content/contentserver" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "content", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.ContentService] + if !ok { + return nil, errors.New("content store service not found") + } + cs, err := p.Instance() + if err != nil { + return nil, err + } + return contentserver.New(cs.(content.Store)), nil + }, + }) +} diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/services/content/store.go new file mode 100644 index 0000000000000..3de91d37c8598 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/store.go @@ -0,0 +1,71 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + digest "github.com/opencontainers/go-digest" +) + +// store wraps content.Store with proper event published. +type store struct { + content.Store + publisher events.Publisher +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ContentService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events) + return s, err + }, + }) +} + +func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) { + return &store{ + Store: cs, + publisher: publisher, + }, nil +} + +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + if err := s.Store.Delete(ctx, dgst); err != nil { + return err + } + // TODO: Consider whether we should return error here. + return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{ + Digest: dgst, + }) +} diff --git a/vendor/github.com/containerd/containerd/services/services.go b/vendor/github.com/containerd/containerd/services/services.go new file mode 100644 index 0000000000000..efc920093e99d --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/services.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package services + +const ( + // ContentService is id of content service. + ContentService = "content-service" + // SnapshotsService is id of snapshots service. + SnapshotsService = "snapshots-service" + // ImagesService is id of images service. + ImagesService = "images-service" + // ContainersService is id of containers service. + ContainersService = "containers-service" + // TasksService is id of tasks service. + TasksService = "tasks-service" + // NamespacesService is id of namespaces service. + NamespacesService = "namespaces-service" + // LeasesService is id of leases service. + LeasesService = "leases-service" + // DiffService is id of diff service. + DiffService = "diff-service" +) From 62b56f12f1f00344b7afee5a6d65f1bf5617b886 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 10 Apr 2019 15:19:29 -0700 Subject: [PATCH 65/73] Update migration to use layer stores Move logic to image service and merge with cache loading. Signed-off-by: Derek McGowan --- daemon/daemon.go | 27 ++- daemon/images/cache.go | 49 +++--- daemon/images/migration.go | 326 +++++++++++++++++++++++++++++++++++++ daemon/migration.go | 121 +------------- 4 files changed, 366 insertions(+), 157 deletions(-) create mode 100644 daemon/images/migration.go diff --git a/daemon/daemon.go b/daemon/daemon.go index a236544f331df..e4b50a24031e5 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -980,15 +980,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - // TODO(containerd): Check migration from on disk state - // TODO(containerd): Perform migration after image service creation - if os.Getenv("DOCKER_MIGRATE_IMAGE_STORE") != "" { - // TODO(containerd): Pass in preferred driver name for OS - if err := d.Migrate(ctx, config.Root); err != nil { - return nil, err - } - } - // Discovery is only enabled when the daemon is launched with an address to advertise. When // initialized, the daemon is registered and we can store the discovery backend as it's read-only if err := d.initDiscovery(config); err != nil { @@ -1020,24 +1011,26 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.linkIndex = newLinkIndex() - // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only - // used above to run migration. They could be initialized in ImageService - // if migration is called from daemon/images. layerStore might move as well. d.imageService = images.NewImageService(images.ImageServiceConfig{ DefaultNamespace: ContainersNamespace, DefaultPlatform: storageDrivers[0].platform, Client: d.containerdCli, - ContainerStore: d.containers, EventsService: d.EventsService, LayerBackends: backends, MaxConcurrentDownloads: *config.MaxConcurrentDownloads, MaxConcurrentUploads: *config.MaxConcurrentUploads, - RegistryService: registryService, + + // TODO(containerd): Remove registry service from image service + RegistryService: registryService, + + // TODO(containerd): Use containerd's container store after the + // containerd metadata is completely moved to containerd + ContainerStore: d.containers, }) - // TODO(containerd): create earlier, background, and wait at end - if err := d.imageService.LoadCache(namespaces.WithNamespace(ctx, ContainersNamespace)); err != nil { - return nil, errors.Wrap(err, "failed to load image cache from containerd") + nctx := namespaces.WithNamespace(ctx, ContainersNamespace) + if err := d.imageService.Load(nctx, config.Root); err != nil { + return nil, errors.Wrap(err, "failed to load image service") } go d.execCommandGC() diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 5c23cd1af319d..9bd1d306cb4d6 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -38,36 +38,19 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach defer i.cacheL.Unlock() var ( - cs = i.client.ContentStore() - c = &cache{ + c = &cache{ layers: map[string]map[digest.Digest]layer.Layer{}, } ) // Load layers for _, backend := range i.layerBackends { - backendCache := map[digest.Digest]layer.Layer{} - name := backend.DriverName() - label := fmt.Sprintf("%s%s", LabelLayerPrefix, name) - err := cs.Walk(ctx, func(info content.Info) error { - value := digest.Digest(info.Labels[label]) - if _, ok := backendCache[value]; ok { - return nil - } - l, err := backend.Get(layer.ChainID(value)) - if err != nil { - log.G(ctx).WithError(err).WithField("digest", info.Digest).WithField("driver", name).Warnf("unable to get layer") - } else { - log.G(ctx).WithField("digest", info.Digest).WithField("driver", name).Debugf("retaining layer %s", value) - backendCache[value] = l - } - return nil - }, fmt.Sprintf("labels.%q", label)) + backendCache, err := i.loadLayers(ctx, backend) if err != nil { return nil, err } - c.layers[name] = backendCache + c.layers[backend.DriverName()] = backendCache } i.cache[namespace] = c @@ -75,6 +58,32 @@ func (i *ImageService) loadNSCache(ctx context.Context, namespace string) (*cach return c, nil } +func (i *ImageService) loadLayers(ctx context.Context, backend layer.Store) (map[digest.Digest]layer.Layer, error) { + cs := i.client.ContentStore() + backendCache := map[digest.Digest]layer.Layer{} + name := backend.DriverName() + label := fmt.Sprintf("%s%s", LabelLayerPrefix, name) + err := cs.Walk(ctx, func(info content.Info) error { + value := digest.Digest(info.Labels[label]) + if _, ok := backendCache[value]; ok { + return nil + } + l, err := backend.Get(layer.ChainID(value)) + if err != nil { + log.G(ctx).WithError(err).WithField("digest", info.Digest).WithField("driver", name).Warnf("unable to get layer") + } else { + log.G(ctx).WithField("digest", info.Digest).WithField("driver", name).Debugf("retaining layer %s", value) + backendCache[value] = l + } + return nil + }, fmt.Sprintf("labels.%q", label)) + if err != nil { + return nil, err + } + + return backendCache, nil +} + func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { namespace, ok := namespaces.Namespace(ctx) if !ok { diff --git a/daemon/images/migration.go b/daemon/images/migration.go new file mode 100644 index 0000000000000..979a3865f60c8 --- /dev/null +++ b/daemon/images/migration.go @@ -0,0 +1,326 @@ +package images + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/docker/distribution/reference" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Load migrates and loads caches for the image service. +func (i *ImageService) Load(ctx context.Context, root string) (err error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + log.G(ctx).WithField("namespace", namespace).Debugf("migration and loading cache") + + var ( + c = &cache{ + layers: map[string]map[digest.Digest]layer.Layer{}, + } + t1 = time.Now() + entry = log.G(ctx).WithField("namespace", namespace) + done func(context.Context) error + version = []byte("1") + ) + + i.cacheL.Lock() + defer func() { + if err == nil { + i.cache[namespace] = c + entry.WithField("t", time.Since(t1)).Infof("finished load and migration") + } + i.cacheL.Unlock() + }() + + for _, backend := range i.layerBackends { + p := filepath.Join(root, "image", backend.DriverName()) + if _, err := os.Stat(p); err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + + var backendCache map[digest.Digest]layer.Layer + if _, err := os.Stat(filepath.Join(p, "migration")); err == nil { + // Version of migration not currently relevant, all non-empty can be skipped + + backendCache, err = i.loadLayers(ctx, backend) + if err != nil { + return err + } + + // TODO(containerd): Add distribution metadata store if migration level is less than 2 + } else if !os.IsNotExist(err) { + return err + } else { + if done == nil { + ctx, done, err = i.client.WithLease(ctx) + if err != nil { + return err + } + + defer func() { + if err := done(context.Background()); err != nil { + entry.WithError(err).Error("failed to remove lease") + } + }() + } + + entry.WithField("root", p).Debugf("migrating images") + + var updates map[digest.Digest]*time.Time + backendCache, updates, err = i.migrateImages(ctx, filepath.Join(p, "imagedb"), backend) + if err != nil { + return errors.Wrap(err, "failed to migrate images") + } + + if err := i.migrateRepositories(ctx, p, updates); err != nil { + return errors.Wrap(err, "failed to migrate repositories") + } + + if err := ioutils.AtomicWriteFile(filepath.Join(p, "migration"), version, 0600); err != nil { + return errors.Wrap(err, "failed to write migration file") + } + + } + + c.layers[backend.DriverName()] = backendCache + } + + return nil +} + +func (i *ImageService) migrateImages(ctx context.Context, root string, ls layer.Store) (map[digest.Digest]layer.Layer, map[digest.Digest]*time.Time, error) { + backendCache := map[digest.Digest]layer.Layer{} + backendName := ls.DriverName() + updates := map[digest.Digest]*time.Time{} + cs := i.client.ContentStore() + + // Only Canonical digest (sha256) is currently supported + dir := filepath.Join(root, "content", string(digest.Canonical)) + subs, err := ioutil.ReadDir(dir) + if err != nil { + return nil, nil, err + } + mpath := filepath.Join(root, "metadata", string(digest.Canonical)) + for _, v := range subs { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + log.G(ctx).WithError(err).Debugf("skipping invalid digest %q", dgst) + continue + } + + contents, err := ioutil.ReadFile(filepath.Join(dir, v.Name())) + if err != nil { + log.G(ctx).WithError(err).WithField("id", dgst).Errorf("failed to read content") + continue + } + + var config ocispec.Image + if err := json.Unmarshal(contents, &config); err != nil { + log.G(ctx).WithError(err).WithField("id", dgst).Errorf("unable to parse config") + continue + } + + chainID := identity.ChainID(config.RootFS.DiffIDs) + if _, ok := backendCache[chainID]; !ok { + l, err := ls.Get(layer.ChainID(chainID)) + if err != nil { + if err == layer.ErrLayerDoesNotExist { + log.G(ctx).WithField("id", dgst).WithField("chainid", chainID).Warnf("missing referenced layer") + } else { + return nil, nil, errors.Wrap(err, "failed to get layer") + } + } else { + backendCache[chainID] = l + } + } + + var parent digest.Digest + if b, err := ioutil.ReadFile(filepath.Join(mpath, v.Name(), "parent")); err == nil && len(b) > 0 { + parent := digest.Digest(b) + if err := parent.Validate(); err != nil { + log.G(ctx).WithError(err).Debugf("invalid parent %q", parent) + parent = "" + } + } else if err != nil && !os.IsNotExist(err) { + log.G(ctx).WithError(err).WithField("id", dgst).Errorf("failed to read parent") + } + + var lastUpdated *time.Time + if b, err := ioutil.ReadFile(filepath.Join(mpath, v.Name(), "lastUpdated")); err == nil && len(b) > 0 { + t, err := time.Parse(time.RFC3339Nano, string(b)) + if err != nil { + log.G(ctx).WithError(err).Debugf("invalid lastUpdated %q", string(b)) + } else { + lastUpdated = &t + } + } else if err != nil && !os.IsNotExist(err) { + log.G(ctx).WithError(err).WithField("id", dgst).Errorf("failed to read last updated") + } + updates[dgst] = lastUpdated + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: dgst, + Size: int64(len(contents)), + } + + labels := map[string]string{ + LabelLayerPrefix + backendName: chainID.String(), + } + + if parent != "" { + labels[LabelImageParent] = parent.String() + } + + ref := "config-" + dgst.Algorithm().String() + "-" + dgst.Encoded() + if err := content.WriteBlob(ctx, cs, ref, bytes.NewReader(contents), desc, content.WithLabels(labels)); err != nil { + log.G(ctx).WithError(err).WithField("id", dgst).Errorf("can't store config") + } + + } + return backendCache, updates, nil +} + +func (i *ImageService) migrateRepositories(ctx context.Context, root string, all map[digest.Digest]*time.Time) error { + b, err := ioutil.ReadFile(filepath.Join(root, `repositories.json`)) + if err != nil { + return errors.Wrap(err, "failed to read repositories file") + } + var repos struct { + Repositories map[string]map[string]digest.Digest + } + if err := json.Unmarshal(b, &repos); err != nil { + return errors.Wrap(err, "invalid repositories file") + } + is := i.client.ImageService() + cs := i.client.ContentStore() + remaining := map[digest.Digest]struct{}{} + for dgst, lastUpdated := range all { + if lastUpdated != nil { + // TODO(containerd): this value was only used for image inspect + // Metadata.LastTagTime, this has been replaced by the actual + // last tag time in containerd, but that updated time cannot + // currently be backdated through the containerd API. + log.G(ctx).WithField("id", dgst).WithField("lastUpdated", *lastUpdated).Debugf("dropping 'lastUpdated' value") + } + + remaining[dgst] = struct{}{} + } + for _, repoGroup := range repos.Repositories { + imgs := map[digest.Digest][]reference.Named{} + for name, dgst := range repoGroup { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + log.G(ctx).WithError(err).WithField("name", name).Warnf("skipping bad name") + } + imgs[dgst] = append(imgs[dgst], named) + } + for dgst, refs := range imgs { + info, err := cs.Info(ctx, dgst) + if err != nil { + if !errdefs.IsNotFound(err) { + return errors.Wrap(err, "unable to stat content") + } + log.G(ctx).WithField("digest", dgst).Errorf("missing image, ignoring tags") + continue + } + var names []reference.Named + var tags []string + var untagged []reference.Named + for _, ref := range refs { + if tagged, ok := ref.(reference.NamedTagged); ok { + names = append(names, tagged) + tags = append(tags, tagged.Tag()) + } else { + untagged = append(untagged, ref) + } + } + + for _, untagged := range refs { + if len(tags) > 0 { + for _, tag := range tags { + nt, err := reference.WithTag(untagged, tag) + if err != nil { + log.G(ctx).WithError(err).WithField("tag", tag).Errorf("ignoring invalid tag") + continue + } + names = append(names, nt) + } + } else { + names = append(names, untagged) + } + } + + for _, named := range names { + _, err = is.Create(ctx, images.Image{ + Name: named.String(), + Target: ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: dgst, + Size: info.Size, + }, + // TODO(containerd): Support setting created/updated time, + // ignored by containerd daemon as of 1.2 + }) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to create image") + } + log.G(ctx).WithField("name", named.String()).WithField("digest", dgst).Debugf("image already exists, skipping") + } + } + delete(remaining, dgst) + + } + } + + for dgst := range remaining { + info, err := cs.Info(ctx, dgst) + if err != nil { + if !errdefs.IsNotFound(err) { + return errors.Wrap(err, "unable to stat content") + } + log.G(ctx).WithField("digest", dgst).Errorf("missing image, ignoring tags") + continue + } + + _, err = is.Create(ctx, images.Image{ + Name: "@" + dgst.String(), + Target: ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: dgst, + Size: info.Size, + }, + // TODO(containerd): Support setting created/updated time, + // ignored by containerd daemon as of 1.2 + }) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to create image") + } + } + } + return nil +} diff --git a/daemon/migration.go b/daemon/migration.go index 9915998e6ed0c..85f1ccdd069cf 100644 --- a/daemon/migration.go +++ b/daemon/migration.go @@ -1,133 +1,14 @@ package daemon import ( - "context" - "encoding/json" - "fmt" - "path/filepath" - "runtime" - "time" - - containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/docker/daemon/images" "github.com/docker/docker/distribution" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" "github.com/docker/docker/layer" dockerreference "github.com/docker/docker/reference" - refstore "github.com/docker/docker/reference" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) -// Migrate migrates the given root directory to containerd -func (d *Daemon) Migrate(ctx context.Context, root string) error { - if d.containerdCli == nil { - return errors.New("unable to migrate without containerd") - } - - // TODO(containerd): Migrate ALL configured graph drivers, in reverse order - // to keep latest configured driver with the image store - imageRoot := filepath.Join(root, "image", d.graphDrivers[runtime.GOOS]) - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return err - } - - // We have a single tag/reference store for the daemon globally. However, it's - // stored under the graphdriver. On host platforms which only support a single - // container OS, but multiple selectable graphdrivers, this means depending on which - // graphdriver is chosen, the global reference store is under there. For - // platforms which support multiple container operating systems, this is slightly - // more problematic as where does the global ref store get located? Fortunately, - // for Windows, which is currently the only daemon supporting multiple container - // operating systems, the list of graphdrivers available isn't user configurable. - // For backwards compatibility, we just put it under the windowsfilter - // directory regardless. - refStoreLocation := filepath.Join(imageRoot, `repositories.json`) - rs, err := refstore.NewReferenceStore(refStoreLocation) - if err != nil { - return fmt.Errorf("Couldn't create reference store repository: %s", err) - } - - ctx, done, err := d.containerdCli.WithLease(ctx) - if err != nil { - return err - } - - defer func() { - if err := done(context.Background()); err != nil { - logrus.WithError(err).Error("failed to remove lease") - } - }() - - if err := image.MigrateImageStore(ctx, ifs, d.containerdCli.ContentStore(), images.LabelImageParent); err != nil { - return err - } - - print("Migrating references ") - numRef := 0 - rs.Walk(func(ref reference.Named) error { - id, err := rs.Get(ref) - if err != nil { - logrus.WithError(err).Warnf("can't get digest for %s", id) - return nil - } - config, err := ifs.Get(id) - if err != nil { - logrus.WithError(err).Warnf("can't get config for %s", id) - return nil - } - - desc := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: id, - Size: int64(len(config)), - } - - // find out created time - var img image.Image - if err := json.Unmarshal(config, &img); err != nil { - logrus.WithError(err).Warn("can't parse image") - return nil - } - created := img.Created - // find out updated time - updated := created - if updatedStr, err := ifs.GetMetadata(id, "lastUpdated"); err == nil { - updated, err = time.Parse(time.RFC3339Nano, string(updatedStr)) - if err != nil { - logrus.WithError(err).Warn("can't parse lastUpdated time %q for %s", string(updatedStr), id) - updated = created - } - } - _, err = d.containerdCli.ImageService().Create(ctx, containerdimages.Image{ - Name: ref.String(), - Target: desc, - CreatedAt: created, - UpdatedAt: updated, - Labels: map[string]string{}, // TODO any labels here? - }) - if err != nil { - logrus.WithError(err).Warn("can't create image") - return nil - } - print(".") - // TODO - // rs.Delete(ref) - - numRef++ - return nil - }) - println(" done,", numRef, "references") - return nil -} - -// DEPRECATED AFTER MIGRATION - // DistributionServices provides daemon image storage services type DistributionServices struct { DownloadManager distribution.RootFSDownloadManager @@ -138,7 +19,7 @@ type DistributionServices struct { } // DistributionServices returns services controlling daemon storage -// TODO(containerd): Remove this +// TODO(containerd): deprecated after migration func (d *Daemon) DistributionServices() (DistributionServices, error) { ls, err := d.imageService.GetLayerStore(platforms.DefaultSpec()) if err != nil { From 7493e8d439e6313cc465385170a6aa99d533a099 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 11 Apr 2019 16:51:51 -0700 Subject: [PATCH 66/73] Ignore not exists errors on migration Signed-off-by: Derek McGowan --- daemon/images/migration.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/daemon/images/migration.go b/daemon/images/migration.go index 979a3865f60c8..c229a7a8bfef4 100644 --- a/daemon/images/migration.go +++ b/daemon/images/migration.go @@ -120,6 +120,9 @@ func (i *ImageService) migrateImages(ctx context.Context, root string, ls layer. dir := filepath.Join(root, "content", string(digest.Canonical)) subs, err := ioutil.ReadDir(dir) if err != nil { + if os.IsNotExist(err) { + return backendCache, updates, nil + } return nil, nil, err } mpath := filepath.Join(root, "metadata", string(digest.Canonical)) @@ -206,6 +209,10 @@ func (i *ImageService) migrateImages(ctx context.Context, root string, ls layer. func (i *ImageService) migrateRepositories(ctx context.Context, root string, all map[digest.Digest]*time.Time) error { b, err := ioutil.ReadFile(filepath.Join(root, `repositories.json`)) if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "failed to read repositories file") } var repos struct { From 6993befeaf6da816420b5a127cb408628c2d6eba Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 23 Apr 2019 15:40:36 -0700 Subject: [PATCH 67/73] Update dockerfile builder Replace use of image id with descriptor Signed-off-by: Derek McGowan --- api/server/backend/build/backend.go | 34 ++--- api/types/backend/backend.go | 3 +- api/types/backend/build.go | 6 +- api/types/container/config.go | 2 + builder/builder-next/builder.go | 3 +- builder/builder.go | 22 ++- builder/dockerfile/builder.go | 32 +++-- builder/dockerfile/containerbackend.go | 13 +- builder/dockerfile/dispatchers.go | 39 ++++-- builder/dockerfile/dispatchers_test.go | 67 +++++---- builder/dockerfile/evaluator.go | 55 ++++---- builder/dockerfile/imagecontext.go | 88 +++++------- builder/dockerfile/internals.go | 67 ++++++--- builder/dockerfile/mockbackend_test.go | 32 +++-- daemon/commit.go | 9 +- daemon/create.go | 23 +-- daemon/images/cache.go | 18 ++- daemon/images/image.go | 11 +- daemon/images/image_builder.go | 185 ++++++++++++++++--------- daemon/images/image_commit.go | 41 +++--- daemon/images/image_pull.go | 25 ++-- daemon/images/images.go | 4 +- 22 files changed, 454 insertions(+), 325 deletions(-) diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index 02b3285bbf69b..471c9a5c12167 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -11,7 +11,6 @@ import ( buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/fscache" "github.com/docker/docker/pkg/stringid" - digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -20,7 +19,7 @@ import ( // ImageComponent provides an interface for working with images type ImageComponent interface { - SquashImage(from string, to string) (string, error) + SquashImage(ctx context.Context, from ocispec.Descriptor, to *ocispec.Descriptor) (ocispec.Descriptor, error) TagImageWithReference(context.Context, ocispec.Descriptor, reference.Reference) error } @@ -76,13 +75,13 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string return "", nil } - var imageID = build.ImageID + var image = build.Image if options.Squash { - if imageID, err = squashBuild(build, b.imageComponent); err != nil { - return "", err + if image, err = b.imageComponent.SquashImage(ctx, build.Image, build.FromImage); err != nil { + return "", errors.Wrap(err, "error squashing image") } if config.ProgressWriter.AuxFormatter != nil { - if err = config.ProgressWriter.AuxFormatter.Emit("moby.image.id", types.BuildResult{ID: imageID}); err != nil { + if err = config.ProgressWriter.AuxFormatter.Emit("moby.image.id", types.BuildResult{ID: image.Digest.String()}); err != nil { return "", err } } @@ -90,15 +89,12 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string if !useBuildKit { stdout := config.ProgressWriter.StdoutFormatter - fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(image.Digest.String())) } - if imageID != "" { - err = tagger.TagImages(ctx, ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: digest.Digest(imageID), - }) + if image.Digest != "" { + err = tagger.TagImages(ctx, image) } - return imageID, err + return image.Digest.String(), err } // PruneCache removes all cached build sources @@ -137,15 +133,3 @@ func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOpti func (b *Backend) Cancel(ctx context.Context, id string) error { return b.buildkit.Cancel(ctx, id) } - -func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { - var fromID string - if build.FromImage != nil { - fromID = build.FromImage.ImageID() - } - imageID, err := imageComponent.SquashImage(build.ImageID, fromID) - if err != nil { - return "", errors.Wrap(err, "error squashing image") - } - return imageID, nil -} diff --git a/api/types/backend/backend.go b/api/types/backend/backend.go index ef1e669c396f1..7d3fd736e4a3d 100644 --- a/api/types/backend/backend.go +++ b/api/types/backend/backend.go @@ -6,6 +6,7 @@ import ( "time" "github.com/docker/docker/api/types/container" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ContainerAttachConfig holds the streams to use when connecting to a container to view logs. @@ -124,5 +125,5 @@ type CommitConfig struct { ContainerID string ContainerMountLabel string ContainerOS string - ParentImageID string + ParentImage *ocispec.Descriptor } diff --git a/api/types/backend/build.go b/api/types/backend/build.go index ea99c422080f3..2fe7023f81b36 100644 --- a/api/types/backend/build.go +++ b/api/types/backend/build.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/streamformatter" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // PullOption defines different modes for accessing images @@ -42,12 +42,12 @@ type GetImageAndLayerOptions struct { PullOption PullOption AuthConfig map[string]types.AuthConfig Output io.Writer - Platform *specs.Platform + Platform *ocispec.Platform } // NewImageConfig are options for creating new images type NewImageConfig struct { - ParentImageID string + ParentImage *ocispec.Descriptor Author string OS string ContainerConfig *container.Config diff --git a/api/types/container/config.go b/api/types/container/config.go index f767195b94b41..a7094ee5a4fe9 100644 --- a/api/types/container/config.go +++ b/api/types/container/config.go @@ -5,6 +5,7 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // MinimumDuration puts a minimum on user configured duration. @@ -56,6 +57,7 @@ type Config struct { Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + RuntimeImage *ocispec.Descriptor `json:",omitempty"` // Descriptor for the runtime image configuration (oci image or Docker image config) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index 091f6caac1d6b..ad1b32dee69d5 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -381,7 +381,8 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder. if !ok { return errors.Errorf("missing image id") } - out.ImageID = id + // TODO(containerd): Use a descriptor + //out.ImageID = id return aux.Emit("moby.image.id", types.BuildResult{ID: id}) }) diff --git a/builder/builder.go b/builder/builder.go index fe3d1141c9ed4..9063c47e7e101 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -12,7 +12,6 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -41,9 +40,14 @@ type Backend interface { ImageBackend ExecBackend + // Resolve image resolves a ref into a descriptor + ResolveImage(context.Context, string) (ocispec.Descriptor, error) + + ResolveRuntimeConfig(context.Context, ocispec.Descriptor) ([]byte, error) + // CommitBuildStep creates a new Docker image from the config generated by // a build step. - CommitBuildStep(context.Context, backend.CommitConfig) (image.ID, error) + CommitBuildStep(context.Context, backend.CommitConfig) (ocispec.Descriptor, error) // ContainerCreateWorkdir creates the workdir ContainerCreateWorkdir(containerID string) error @@ -54,7 +58,7 @@ type Backend interface { // ImageBackend are the interface methods required from an image component type ImageBackend interface { - GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ROLayer, error) + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, ROLayer, error) } // ExecBackend contains the interface methods required for executing containers @@ -75,8 +79,8 @@ type ExecBackend interface { // Result is the output produced by a Builder type Result struct { - ImageID string - FromImage Image + Image ocispec.Descriptor + FromImage *ocispec.Descriptor } // ImageCacheBuilder represents a generator for stateful image cache. @@ -93,14 +97,6 @@ type ImageCache interface { GetCache(parentID string, cfg *container.Config) (imageID string, err error) } -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config - MarshalJSON() ([]byte, error) - OperatingSystem() string -} - // ROLayer is a reference to image rootfs layer type ROLayer interface { Release() error diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index da043a34de617..8d06cf794f5f9 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -21,7 +21,6 @@ import ( "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" @@ -273,18 +272,18 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil if err != nil { return nil, err } - if dispatchState.imageID == "" { + if dispatchState.image == nil { buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() return nil, errors.New("No image was generated. Is your Dockerfile empty?") } - return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil + return &builder.Result{Image: *dispatchState.image, FromImage: dispatchState.baseImage}, nil } func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { - if aux == nil || state.imageID == "" { + if aux == nil || state.image == nil { return nil } - return aux.Emit("", types.BuildResult{ID: state.imageID}) + return aux.Emit("", types.BuildResult{ID: state.image.Digest.String()}) } func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { @@ -337,7 +336,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. return nil, err } dispatchRequest.state.updateRunConfig() - fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortDispatchID(dispatchRequest.state)) for _, cmd := range stage.Commands { select { case <-b.clientCtx.Done(): @@ -355,7 +354,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. return nil, err } dispatchRequest.state.updateRunConfig() - fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortDispatchID(dispatchRequest.state)) } if err := emitImageID(b.Aux, dispatchRequest.state); err != nil { @@ -370,6 +369,13 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. return dispatchRequest.state, nil } +func shortDispatchID(state *dispatchState) string { + if state.image == nil { + return "" + } + return state.image.Digest.Encoded()[:12] +} + // BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile // It will: // - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. @@ -378,7 +384,8 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. // BuildFromConfig is used by the /commit endpoint, with the changes // coming from the query parameter of the same name. // -// TODO: Remove? +// TODO(containerd): Remove this context less function? +// At very least, add context and remove os arg func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { if !system.IsOSSupported(os) { return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) @@ -419,10 +426,17 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co commands = append(commands, cmd) } + img, err := b.docker.ResolveImage(context.Background(), config.Image) + if err != nil { + // TODO(containerd): Resolve and wrap this error better? + return nil, errdefs.InvalidParameter(err) + } + dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) // We make mutations to the configuration, ensure we have a copy dispatchRequest.state.runConfig = copyRunConfig(config) - dispatchRequest.state.imageID = config.Image + dispatchRequest.state.image = &img + // TODO(containerd): remove OS here after replaced by platform dispatchRequest.state.operatingSystem = os for _, cmd := range commands { err := dispatch(dispatchRequest, cmd) diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go index 049e4bf19a201..1f7c95ebeadc7 100644 --- a/builder/dockerfile/containerbackend.go +++ b/builder/dockerfile/containerbackend.go @@ -10,7 +10,6 @@ import ( "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -30,17 +29,9 @@ func newContainerManager(docker builder.ExecBackend) *containerManager { } // Create a container -func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { - // note that all callers calling this function should - // only intend to run an intermediate container during - // the build process so that we can safely make the - // assumption of MediaTypeImageConfig type - desc := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: digest.Digest(runConfig.Image), - } +func (c *containerManager) Create(ctx context.Context, img *ocispec.Descriptor, runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{ - Descriptor: &desc, + Descriptor: img, Config: runConfig, HostConfig: hostConfig, }) diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index 4fd59a7831238..0e19930debe63 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -17,9 +17,7 @@ import ( "github.com/containerd/containerd/platforms" "github.com/docker/docker/api" "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/builder" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" @@ -27,6 +25,7 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -176,9 +175,18 @@ func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { if err != nil { return err } + var imageBytes []byte + if image != nil { + imageBytes, err = d.builder.docker.ResolveRuntimeConfig(d.builder.clientCtx, *image) + if err != nil { + return err + } + } else { + d.state.noBaseImage = true + } state := d.state - if err := state.beginStage(cmd.Name, image); err != nil { - return err + if err := state.beginStage(cmd.Name, image, imageBytes); err != nil { + return errors.Wrap(err, "failed to begin stage") } if len(state.runConfig.OnBuild) > 0 { triggers := state.runConfig.OnBuild @@ -231,7 +239,7 @@ func (d *dispatchRequest) getExpandedString(shlex *shell.Lex, str string) (strin return name, nil } -func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform) (builder.Image, error) { +func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform) (*ocispec.Descriptor, error) { var localOnly bool if im, ok := d.stages.getByName(name); ok { name = im.Image @@ -248,11 +256,8 @@ func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform) if platform != nil { p = *platform } - imageImage := &image.Image{} - imageImage.OS = p.OS // old windows scratch handling - // TODO: scratch should not have an os. It should be nil image. // Windows supports scratch. What is not supported is running containers // from it. if runtime.GOOS == "windows" { @@ -260,22 +265,24 @@ func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform) if !system.LCOWSupported() { return nil, errors.New("Linux containers are not supported on this system") } - imageImage.OS = "linux" } else if platform.OS == "windows" { return nil, errors.New("Windows does not support FROM scratch") } else { return nil, errors.Errorf("platform %s is not supported", platforms.Format(p)) } } - return builder.Image(imageImage), nil + + return nil, nil } + imageMount, err := d.builder.imageSources.Get(name, localOnly, platform) if err != nil { return nil, err } + return imageMount.Image(), nil } -func (d *dispatchRequest) getFromImage(shlex *shell.Lex, basename string, platform *specs.Platform) (builder.Image, error) { +func (d *dispatchRequest) getFromImage(shlex *shell.Lex, basename string, platform *specs.Platform) (*ocispec.Descriptor, error) { name, err := d.getExpandedString(shlex, basename) if err != nil { return nil, err @@ -342,9 +349,9 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { // RUN [ "echo", "hi" ] # echo hi // func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { - if !system.IsOSSupported(d.state.operatingSystem) { - return system.ErrNotSupportedOperatingSystem - } + //if !system.IsOSSupported(d.state.operatingSystem) { + // return system.ErrNotSupportedOperatingSystem + //} stateRunConfig := d.state.runConfig cmdFromArgs, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem, c.Name(), c.String()) buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env) @@ -362,6 +369,8 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { return err } + // TODO(containerd): extract runtime config + // add to runConfig for now runConfig := copyRunConfig(stateRunConfig, withCmd(cmdFromArgs), withArgsEscaped(argsEscaped), @@ -369,7 +378,7 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { withEntrypointOverride(saveCmd, strslice.StrSlice{""}), withoutHealthcheck()) - cID, err := d.builder.create(runConfig) + cID, err := d.builder.create(d.state.image, runConfig) if err != nil { return err } diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go index fb823ff41c4f0..4f6756603e62a 100644 --- a/builder/dockerfile/dispatchers_test.go +++ b/builder/dockerfile/dispatchers_test.go @@ -3,6 +3,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "bytes" "context" + "encoding/json" "runtime" "strings" "testing" @@ -12,12 +13,13 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "gotest.tools/assert" is "gotest.tools/assert/cmp" ) @@ -122,17 +124,17 @@ func TestFromScratch(t *testing.T) { assert.NilError(t, err) assert.Check(t, sb.state.hasFromImage()) - assert.Check(t, is.Equal("", sb.state.imageID)) + assert.Check(t, is.Nil(sb.state.image)) expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) assert.Check(t, is.DeepEqual([]string{expected}, sb.state.runConfig.Env)) } func TestFromWithArg(t *testing.T) { - tag, expected := ":sometag", "expectedthisid" + tag, expected := ":sometag", digest.Digest("expectedthisid") - getImage := func(name string) (builder.Image, builder.ROLayer, error) { - assert.Check(t, is.Equal("alpine"+tag, name)) - return &mockImage{id: "expectedthisid"}, nil, nil + getImage := func(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { + assert.Check(t, is.Equal("alpine"+tag, refOrID)) + return &ocispec.Descriptor{Digest: "expectedthisid"}, nil, nil } b := newBuilderWithMockBackend() b.docker.(*MockBackend).getImageFunc = getImage @@ -153,8 +155,8 @@ func TestFromWithArg(t *testing.T) { err = initializeStage(sb, cmd) assert.NilError(t, err) - assert.Check(t, is.Equal(expected, sb.state.imageID)) - assert.Check(t, is.Equal(expected, sb.state.baseImage.ImageID())) + assert.Check(t, is.Equal(expected, sb.state.image.Digest)) + assert.Check(t, is.Equal(expected, sb.state.baseImage.Digest)) assert.Check(t, is.Len(sb.state.buildArgs.GetAllAllowed(), 0)) assert.Check(t, is.Len(sb.state.buildArgs.GetAllMeta(), 1)) } @@ -176,12 +178,13 @@ func TestFromWithArgButBuildArgsNotGiven(t *testing.T) { } func TestFromWithUndefinedArg(t *testing.T) { - tag, expected := "sometag", "expectedthisid" + tag, expected := "sometag", digest.Digest("expectedthisid") - getImage := func(name string) (builder.Image, builder.ROLayer, error) { - assert.Check(t, is.Equal("alpine", name)) - return &mockImage{id: "expectedthisid"}, nil, nil + getImage := func(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { + assert.Check(t, is.Equal("alpine", refOrID)) + return &ocispec.Descriptor{Digest: "expectedthisid"}, nil, nil } + b := newBuilderWithMockBackend() b.docker.(*MockBackend).getImageFunc = getImage sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) @@ -193,7 +196,7 @@ func TestFromWithUndefinedArg(t *testing.T) { } err := initializeStage(sb, cmd) assert.NilError(t, err) - assert.Check(t, is.Equal(expected, sb.state.imageID)) + assert.Check(t, is.Equal(expected, sb.state.image.Digest)) } func TestFromMultiStageWithNamedStage(t *testing.T) { @@ -227,7 +230,7 @@ func TestOnbuild(t *testing.T) { func TestWorkdir(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - sb.state.baseImage = &mockImage{} + sb.state.baseImage = &ocispec.Descriptor{} workingDir := "/app" if runtime.GOOS == "windows" { workingDir = "C:\\app" @@ -244,7 +247,7 @@ func TestWorkdir(t *testing.T) { func TestCmd(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - sb.state.baseImage = &mockImage{} + sb.state.baseImage = &ocispec.Descriptor{} command := "./executable" cmd := &instructions.CmdCommand{ @@ -302,7 +305,7 @@ func TestHealthcheckCmd(t *testing.T) { func TestEntrypoint(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - sb.state.baseImage = &mockImage{} + sb.state.baseImage = &ocispec.Descriptor{} entrypointCmd := "/usr/sbin/nginx" cmd := &instructions.EntrypointCommand{ @@ -378,7 +381,7 @@ func TestStopSignal(t *testing.T) { } b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - sb.state.baseImage = &mockImage{} + sb.state.baseImage = &ocispec.Descriptor{} signal := "SIGKILL" cmd := &instructions.StopSignalCommand{ @@ -463,12 +466,16 @@ func TestRunWithBuildArgs(t *testing.T) { return imageCache } b.imageProber = newImageProber(mockBackend, nil, false) - mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) { - return &mockImage{ - id: "abcdef", - config: &container.Config{Cmd: origCmd}, + mockBackend.getImageFunc = func(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { + return &ocispec.Descriptor{ + Digest: "abcdef", }, nil, nil } + mockBackend.resolveRuntimeConfigFunc = func(context.Context, ocispec.Descriptor) ([]byte, error) { + var img ocispec.Image + img.Config.Cmd = origCmd + return json.Marshal(&img) + } mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { // Check the runConfig.Cmd sent to create() assert.Check(t, is.DeepEqual(cmdWithShell, config.Config.Cmd)) @@ -476,13 +483,14 @@ func TestRunWithBuildArgs(t *testing.T) { assert.Check(t, is.DeepEqual(strslice.StrSlice{""}, config.Config.Entrypoint)) return container.ContainerCreateCreatedBody{ID: "12345"}, nil } - mockBackend.commitFunc = func(cfg backend.CommitConfig) (image.ID, error) { + mockBackend.commitFunc = func(cfg backend.CommitConfig) (ocispec.Descriptor, error) { // Check the runConfig.Cmd sent to commit() assert.Check(t, is.DeepEqual(origCmd, cfg.Config.Cmd)) assert.Check(t, is.DeepEqual(cachedCmd, cfg.ContainerConfig.Cmd)) assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Config.Entrypoint)) - return "", nil + return ocispec.Descriptor{}, nil } + from := &instructions.Stage{BaseName: "abcdef"} err := initializeStage(sb, from) assert.NilError(t, err) @@ -516,7 +524,7 @@ func TestRunIgnoresHealthcheck(t *testing.T) { sb := newDispatchRequest(b, '`', nil, args, newStagesBuildResults()) b.disableCommit = false - origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + //origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) imageCache := &mockImageCache{ getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { @@ -529,17 +537,16 @@ func TestRunIgnoresHealthcheck(t *testing.T) { return imageCache } b.imageProber = newImageProber(mockBackend, nil, false) - mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) { - return &mockImage{ - id: "abcdef", - config: &container.Config{Cmd: origCmd}, + mockBackend.getImageFunc = func(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { + return &ocispec.Descriptor{ + Digest: "abcdef", }, nil, nil } mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { return container.ContainerCreateCreatedBody{ID: "12345"}, nil } - mockBackend.commitFunc = func(cfg backend.CommitConfig) (image.ID, error) { - return "", nil + mockBackend.commitFunc = func(cfg backend.CommitConfig) (ocispec.Descriptor, error) { + return ocispec.Descriptor{}, nil } from := &instructions.Stage{BaseName: "abcdef"} err := initializeStage(sb, from) diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go index 69aad74c53841..550b03df6e526 100644 --- a/builder/dockerfile/evaluator.go +++ b/builder/dockerfile/evaluator.go @@ -21,8 +21,8 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "context" + "encoding/json" "reflect" - "runtime" "strconv" "strings" @@ -33,6 +33,7 @@ import ( "github.com/docker/docker/runconfig/opts" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/shell" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -106,14 +107,17 @@ func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { // dispatchState is a data object which is modified by dispatchers type dispatchState struct { - ctx context.Context - runConfig *container.Config - maintainer string - cmdSet bool - imageID string - baseImage builder.Image - stageName string - buildArgs *BuildArgs + ctx context.Context + runConfig *container.Config + maintainer string + cmdSet bool + image *ocispec.Descriptor + stageName string + buildArgs *BuildArgs + baseImage *ocispec.Descriptor // How to represent scratch + noBaseImage bool + + // TODO(containerd): remove these operatingSystem string } @@ -206,29 +210,32 @@ func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Sourc } func (s *dispatchState) updateRunConfig() { - s.runConfig.Image = s.imageID + // TODO(containerd): Should this even be set in the run config? + if s.image != nil { + s.runConfig.Image = s.image.Digest.String() + } } // hasFromImage returns true if the builder has processed a `FROM ` line func (s *dispatchState) hasFromImage() bool { - return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") + return s.image != nil || s.noBaseImage } -func (s *dispatchState) beginStage(stageName string, image builder.Image) error { +func (s *dispatchState) beginStage(stageName string, image *ocispec.Descriptor, config []byte) error { s.stageName = stageName - s.imageID = image.ImageID() - s.operatingSystem = image.OperatingSystem() - if s.operatingSystem == "" { // In case it isn't set - s.operatingSystem = runtime.GOOS - } - if !system.IsOSSupported(s.operatingSystem) { - return system.ErrNotSupportedOperatingSystem - } - if image.RunConfig() != nil { - // copy avoids referencing the same instance when 2 stages have the same base - s.runConfig = copyRunConfig(image.RunConfig()) - } else { + s.image = image + + if len(config) > 0 { + var c struct { + Config *container.Config `json:"config,omitempty"` + } + if err := json.Unmarshal(config, &c); err != nil { + return err + } + s.runConfig = c.Config + } + if s.runConfig == nil { s.runConfig = &container.Config{} } s.baseImage = image diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go index 165b677b1413b..5aa7cb4ecbc8c 100644 --- a/builder/dockerfile/imagecontext.go +++ b/builder/dockerfile/imagecontext.go @@ -2,31 +2,27 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "context" - "runtime" - "github.com/containerd/containerd" - "github.com/containerd/containerd/content" "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - dockerimage "github.com/docker/docker/image" + digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -type getAndMountFunc func(string, bool, *ocispec.Platform) (builder.Image, builder.ROLayer, error) +type getAndMountFunc func(string, bool, *ocispec.Platform) (*ocispec.Descriptor, builder.ROLayer, error) // imageSources mounts images and provides a cache for mounted images. It tracks // all images so they can be unmounted at the end of the build. type imageSources struct { - byImageID map[string]*imageMount + byImageID map[digest.Digest]*imageMount mounts []*imageMount getImage getAndMountFunc } func newImageSources(ctx context.Context, options builderOptions) *imageSources { - getAndMount := func(idOrRef string, localOnly bool, platform *ocispec.Platform) (builder.Image, builder.ROLayer, error) { + getAndMount := func(idOrRef string, localOnly bool, platform *ocispec.Platform) (*ocispec.Descriptor, builder.ROLayer, error) { pullOption := backend.PullOptionNoPull if !localOnly { if options.Options.PullParent { @@ -44,14 +40,16 @@ func newImageSources(ctx context.Context, options builderOptions) *imageSources } return &imageSources{ - byImageID: make(map[string]*imageMount), + byImageID: make(map[digest.Digest]*imageMount), getImage: getAndMount, } } func (m *imageSources) Get(idOrRef string, localOnly bool, platform *ocispec.Platform) (*imageMount, error) { - if im, ok := m.byImageID[idOrRef]; ok { - return im, nil + if dgst, err := digest.Parse(idOrRef); err == nil { + if im, ok := m.byImageID[dgst]; ok { + return im, nil + } } image, layer, err := m.getImage(idOrRef, localOnly, platform) @@ -74,29 +72,30 @@ func (m *imageSources) Unmount() (retErr error) { } func (m *imageSources) Add(im *imageMount) { - switch im.image { - case nil: - // set the OS for scratch images - os := runtime.GOOS - // Windows does not support scratch except for LCOW - if runtime.GOOS == "windows" { - os = "linux" - } - im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} - default: - m.byImageID[im.image.ImageID()] = im + if im.image != nil { + m.byImageID[im.image.Digest] = im + } else { + // TODO(Containerd): Handle scratch images differently + + //// set the OS for scratch images + //os := runtime.GOOS + //// Windows does not support scratch except for LCOW + //if runtime.GOOS == "windows" { + // os = "linux" + //} + //im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} } m.mounts = append(m.mounts, im) } // imageMount is a reference to an image that can be used as a builder.Source type imageMount struct { - image builder.Image + image *ocispec.Descriptor source builder.Source layer builder.ROLayer } -func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount { +func newImageMount(image *ocispec.Descriptor, layer builder.ROLayer) *imageMount { im := &imageMount{image: image, layer: layer} return im } @@ -106,13 +105,14 @@ func (im *imageMount) unmount() error { return nil } if err := im.layer.Release(); err != nil { - return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + // TODO(containerd): cleaner output than %s + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.Digest.String()) } im.layer = nil return nil } -func (im *imageMount) Image() builder.Image { +func (im *imageMount) Image() *ocispec.Descriptor { return im.image } @@ -120,37 +120,11 @@ func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { return im.layer.NewRWLayer() } +// TODO(containerd): Remove this function, always use digest func (im *imageMount) ImageID() string { - return im.image.ImageID() -} - -type containerdImage struct { - desc ocispec.Descriptor - containerdCli *containerd.Client - config *container.Config -} - -// NewContainerdImage returns a containerd image given a container config -func NewContainerdImage(desc ocispec.Descriptor, client *containerd.Client, config *container.Config) builder.Image { - return &containerdImage{desc: desc, containerdCli: client, config: config} -} - -func (ci *containerdImage) ImageID() string { - return ci.desc.Digest.String() -} - -func (ci *containerdImage) RunConfig() *container.Config { - return ci.config -} - -func (ci *containerdImage) OperatingSystem() string { - return ci.desc.Platform.OS -} - -func (ci *containerdImage) MarshalJSON() ([]byte, error) { - b, err := content.ReadBlob(context.Background(), ci.containerdCli.ContentStore(), ci.desc) - if err != nil { - return nil, errors.Wrap(err, "unable to read config") + var imageID string + if im.image != nil { + imageID = im.image.Digest.String() } - return b, nil + return imageID } diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index de25ef27c2d12..11cc5b2b11ea4 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -4,7 +4,6 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" // non-contiguous functionality. Please read the comments. import ( - "context" "crypto/sha256" "encoding/hex" "fmt" @@ -15,6 +14,8 @@ import ( "runtime" "strings" + cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -26,6 +27,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -104,14 +106,19 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta Config: copyRunConfig(dispatchState.runConfig), ContainerConfig: containerConfig, ContainerID: id, + ParentImage: dispatchState.image, } - imageID, err := b.docker.CommitBuildStep(dispatchState.ctx, commitCfg) - dispatchState.imageID = string(imageID) - return err + desc, err := b.docker.CommitBuildStep(dispatchState.ctx, commitCfg) + if err != nil { + return err + } + + dispatchState.image = &desc + return nil } -func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { +func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent *ocispec.Descriptor, runConfig *container.Config) error { newLayer, err := layer.Commit() if err != nil { return err @@ -121,19 +128,30 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren // if there is an error before we can add the full mount with image b.imageSources.Add(newImageMount(nil, newLayer)) + // TODO(containerd): commit layer, create image content, add image, register + // Deprecate use of CreateImage function or merge with CommitBuildStep config := backend.NewImageConfig{ - ParentImageID: parent.ImageID(), + ParentImage: parent, Author: state.maintainer, ContainerConfig: runConfig, Config: copyRunConfig(state.runConfig), } - exportedImage, err := b.docker.CreateImage(context.Background(), config, newLayer) + exportedImage, err := b.docker.CreateImage(state.ctx, config, newLayer) if err != nil { return errors.Wrapf(err, "failed to export image") } - state.imageID = exportedImage.Digest.String() - b.imageSources.Add(newImageMount(NewContainerdImage(exportedImage, b.containerdCli, copyRunConfig(state.runConfig)), newLayer)) + // create a dangling image in the image service + _, err = b.containerdCli.ImageService().Create(state.ctx, images.Image{ + Name: "@" + exportedImage.Digest.String(), + Target: exportedImage, + }) + if err != nil && !cerrdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed to create image") + } + + state.image = &exportedImage + b.imageSources.Add(newImageMount(&exportedImage, newLayer)) return nil } @@ -156,9 +174,14 @@ func (b *Builder) performCopy(req dispatchRequest, inst copyInstruction) error { return err } - imageMount, err := b.imageSources.Get(state.imageID, true, req.builder.platform) + // TODO(containerd):get image mount by descriptor + var imageID string + if state.image != nil { + imageID = state.image.Digest.String() + } + imageMount, err := b.imageSources.Get(imageID, true, req.builder.platform) if err != nil { - return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + return errors.Wrapf(err, "failed to get destination image %q", imageID) } rwLayer, err := imageMount.NewRWLayer() @@ -406,14 +429,22 @@ func getShell(c *container.Config, os string) []string { } func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { - cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + // TODO(containerd): probe cache by descriptor + var imageID string + if dispatchState.image != nil { + imageID = dispatchState.image.Digest.String() + } + cachedID, err := b.imageProber.Probe(imageID, runConfig) if cachedID == "" || err != nil { return false, err } - fmt.Fprint(b.Stdout, " ---> Using cache\n") + // TODO(containerd): cache must return descriptor, return false until fixed + return false, nil + + //fmt.Fprint(b.Stdout, " ---> Using cache\n") - dispatchState.imageID = cachedID - return true, nil + //dispatchState.imageID = cachedID + //return true, nil } var defaultLogConfig = container.LogConfig{Type: "none"} @@ -422,15 +453,15 @@ func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *contai if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { return "", err } - return b.create(runConfig) + return b.create(dispatchState.image, runConfig) } -func (b *Builder) create(runConfig *container.Config) (string, error) { +func (b *Builder) create(img *ocispec.Descriptor, runConfig *container.Config) (string, error) { logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) isWCOW := runtime.GOOS == "windows" && b.platform != nil && b.platform.OS == "windows" hostConfig := hostConfigFromOptions(b.options, isWCOW) - container, err := b.containerManager.Create(b.clientCtx, runConfig, hostConfig) + container, err := b.containerManager.Create(b.clientCtx, img, runConfig, hostConfig) if err != nil { return "", err } diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index de9d291681c3b..c2a4bf761d625 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -11,7 +11,6 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -19,10 +18,11 @@ import ( // MockBackend implements the builder.Backend interface for unit testing type MockBackend struct { - containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - commitFunc func(backend.CommitConfig) (image.ID, error) - getImageFunc func(string) (builder.Image, builder.ROLayer, error) - makeImageCacheFunc func(cacheFrom []string) builder.ImageCache + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(backend.CommitConfig) (ocispec.Descriptor, error) + getImageFunc func(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) + makeImageCacheFunc func(cacheFrom []string) builder.ImageCache + resolveRuntimeConfigFunc func(ctx context.Context, desc ocispec.Descriptor) ([]byte, error) } func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { @@ -40,11 +40,11 @@ func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) return nil } -func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { +func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (ocispec.Descriptor, error) { if m.commitFunc != nil { return m.commitFunc(c) } - return "", nil + return ocispec.Descriptor{}, nil } func (m *MockBackend) ContainerKill(containerID string, sig uint64) error { @@ -67,12 +67,24 @@ func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot s return nil } -func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { if m.getImageFunc != nil { - return m.getImageFunc(refOrID) + return m.getImageFunc(ctx, refOrID, opts) + } + + return &ocispec.Descriptor{Digest: "theid"}, &mockLayer{}, nil +} + +func (m *MockBackend) ResolveImage(context.Context, string) (ocispec.Descriptor, error) { + return ocispec.Descriptor{Digest: "theid"}, nil +} + +func (m *MockBackend) ResolveRuntimeConfig(ctx context.Context, desc ocispec.Descriptor) ([]byte, error) { + if m.resolveRuntimeConfigFunc != nil { + return m.resolveRuntimeConfigFunc(ctx, desc) } - return &mockImage{id: "theid"}, &mockLayer{}, nil + return []byte{}, nil } func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache { diff --git a/daemon/commit.go b/daemon/commit.go index 62ca39fa03301..d483704b437a5 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -7,10 +7,13 @@ import ( "strings" "time" + "github.com/containerd/containerd/images" "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -163,7 +166,11 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, ContainerID: container.ID, ContainerMountLabel: container.MountLabel, ContainerOS: container.OS, - ParentImageID: string(container.ImageID), + // TODO(containerd): store full descriptor in container + ParentImage: &ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: digest.Digest(container.ImageID), + }, }) if err != nil { return "", err diff --git a/daemon/create.go b/daemon/create.go index 66d588537a832..7265d4c7fa9dc 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -70,19 +70,22 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) } + if opts.params.Descriptor == nil { + if opts.params.Config.RuntimeImage != nil { + opts.params.Descriptor = opts.params.Config.RuntimeImage + } else if opts.params.Config.Image != "" { + desc, err := daemon.imageService.ResolveImage(ctx, opts.params.Config.Image) + if err != nil { + return containertypes.ContainerCreateCreatedBody{}, errors.Wrapf(err, "failed to resolve image %s", opts.params.Config.Image) + } + opts.params.Descriptor = &desc + } + } + if opts.params.Descriptor != nil { opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, *opts.params.Descriptor) if err != nil { - return containertypes.ContainerCreateCreatedBody{}, errors.Wrapf(err, "no runtime image found") - } - } else if opts.params.Config.Image != "" { - desc, err := daemon.imageService.ResolveImage(ctx, opts.params.Config.Image) - if err != nil { - return containertypes.ContainerCreateCreatedBody{}, errors.Wrapf(err, "failed to resolve image %s", opts.params.Config.Image) - } - opts.rImage, err = daemon.imageService.ResolveRuntimeImage(ctx, desc) - if err != nil { - return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(err) + return containertypes.ContainerCreateCreatedBody{}, err } } else { // TODO(containerd): move this logic to image service diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 9bd1d306cb4d6..84fb8c0b5481b 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -5,9 +5,11 @@ import ( "fmt" "sync" + "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" "github.com/docker/docker/layer" digest "github.com/opencontainers/go-digest" @@ -102,9 +104,23 @@ func (i *ImageService) getCache(ctx context.Context) (c *cache, err error) { return c, nil } +// TODO(containerd): move this to separate package and replace +// "github.com/docker/docker/image/cache" implementation +type buildCache struct { + sources []string + client *containerd.Client +} + +func (bc *buildCache) GetCache(parentID string, cfg *container.Config) (imageID string, err error) { + return "", nil +} + // MakeImageCache creates a stateful image cache for build. func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { - return nil + return &buildCache{ + sources: sourceRefs, + client: i.client, + } /* if len(sourceRefs) == 0 { return buildcache.NewLocal(i.imageStore) diff --git a/daemon/images/image.go b/daemon/images/image.go index 9328d85905d54..d224768beab58 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -72,7 +72,7 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe return ocispec.Descriptor{}, errors.Wrap(err, "failed to lookup digest") } if len(imgs) == 0 { - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("image not find with digest")) + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("image not found with digest")) } return imgs[0].Target, nil @@ -169,6 +169,15 @@ func (i *ImageService) ResolveRuntimeImage(ctx context.Context, desc ocispec.Des return ri, nil } +// ResolveRuntimeConfig resolves the descriptor to a runtime image and returns the config +func (i *ImageService) ResolveRuntimeConfig(ctx context.Context, desc ocispec.Descriptor) ([]byte, error) { + ri, err := i.ResolveRuntimeImage(ctx, desc) + if err != nil { + return nil, err + } + return ri.ConfigBytes, nil +} + func (i *ImageService) runtimeImages(ctx context.Context, image ocispec.Descriptor) ([]RuntimeImage, error) { var ( imageMap = map[digest.Digest]RuntimeImage{} diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index 9c85e4f2a5ea7..4e83d733adcc5 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -6,25 +6,24 @@ import ( "encoding/json" "fmt" "io" - "runtime" "strings" "time" "github.com/opencontainers/image-spec/identity" "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/image" + "github.com/docker/docker/errdefs" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -151,11 +150,10 @@ func newROLayerForImage(chainID layer.ChainID, layerStore layer.Store) (builder. } // TODO: could this use the regular daemon PullImage ? -// TODO(containerd): don't return *image.Image type -func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform *ocispec.Platform) (*image.Image, error) { +func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform *ocispec.Platform) (ocispec.Descriptor, error) { ref, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, err + return ocispec.Descriptor{}, err } ref = reference.TagNameOnly(ref) @@ -164,7 +162,7 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf // The request came with a full auth config, use it repoInfo, err := i.registryService.ResolveRepository(ref) if err != nil { - return nil, err + return ocispec.Descriptor{}, err } resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) @@ -172,68 +170,87 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf } if err := i.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { - return nil, err + return ocispec.Descriptor{}, err + } + + img, err := i.client.ImageService().Get(ctx, ref.String()) + if err != nil { + // TODO(containerd): error translation can use common function + if !cerrdefs.IsNotFound(err) { + return ocispec.Descriptor{}, err + } + return ocispec.Descriptor{}, errdefs.NotFound(errors.New("id not found")) } - return i.getDockerImage(name) + + return img.Target, nil } // GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. // Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent // leaking of layers. -func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { +func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (*ocispec.Descriptor, builder.ROLayer, error) { if refOrID == "" { // ie FROM scratch - os := runtime.GOOS + var store layer.Store if opts.Platform != nil { - os = opts.Platform.OS - } - if !system.IsOSSupported(os) { - return nil, nil, system.ErrNotSupportedOperatingSystem + var err error + store, err = i.getLayerStore(*opts.Platform) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get layer store") + } + } else { + store = i.layerBackends[0] } - layer, err := newROLayerForImage("", i.layerStores[os]) + + layer, err := newROLayerForImage("", store) return nil, layer, err } + var rImage RuntimeImage if opts.PullOption != backend.PullOptionForcePull { desc, err := i.ResolveImage(ctx, refOrID) - if err != nil { + if err == nil { + // TODO(containerd): Use opts.Platform to resolve + rImage, err = i.ResolveRuntimeImage(ctx, desc) + if err != nil && opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + } else if opts.PullOption == backend.PullOptionNoPull { return nil, nil, errors.Wrapf(err, "failed to resolve image %s", refOrID) } - rImage, err := i.ResolveRuntimeImage(ctx, desc) + } + if rImage.ConfigBytes == nil { + image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform) if err != nil { - if opts.PullOption == backend.PullOptionNoPull { - return nil, nil, err - } - } else { - var img struct { - ocispec.Image - Config *container.Config `json:"config,omitempty"` - } + return nil, nil, err + } - if err := json.Unmarshal(rImage.ConfigBytes, &img); err != nil { - return nil, nil, errors.Wrap(err, "failed to unmarshal config") - } - ci := dockerfile.NewContainerdImage(rImage.Config, i.client, img.Config) - store, err := i.getLayerStore(rImage.Platform) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to get layer store") - } - layer, err := newROLayerForImage(layer.ChainID(identity.ChainID(img.RootFS.DiffIDs)), store) - if err != nil { - err = errors.Wrapf(err, "failed to get layer for image %s", refOrID) - } - return ci, layer, err + // TODO(containerd): Use opts.Platform to resolve + rImage, err = i.ResolveRuntimeImage(ctx, image) + if err != nil { + return nil, nil, err } } - image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform) + var img struct { + // RootFS references the layer content addresses used by the image. + RootFS ocispec.RootFS `json:"rootfs"` + } + + if err := json.Unmarshal(rImage.ConfigBytes, &img); err != nil { + return nil, nil, errors.Wrap(err, "failed to unmarshal config") + } + + store, err := i.getLayerStore(rImage.Platform) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrapf(err, "failed to get layer store for %q", platforms.Format(rImage.Platform)) } - if !system.IsOSSupported(image.OperatingSystem()) { - return nil, nil, system.ErrNotSupportedOperatingSystem + + layer, err := newROLayerForImage(layer.ChainID(identity.ChainID(img.RootFS.DiffIDs)), store) + if err != nil { + err = errors.Wrapf(err, "failed to get layer for image %s", refOrID) } - layer, err := newROLayerForImage(image.RootFS.ChainID(), i.layerStores[image.OperatingSystem()]) - return image, layer, err + + return &rImage.Config, layer, err } // CreateImage creates a new image by adding a config and ID to the image store. @@ -262,24 +279,25 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag DockerVersion string `json:"docker_version,omitempty"` } - if newImage.ParentImageID == "" { + if newImage.ParentImage == nil { img.RootFS.Type = "layers" } else { - desc := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageConfig, - Digest: digest.Digest(newImage.ParentImageID), - } - - b, err := content.ReadBlob(ctx, i.client.ContentStore(), desc) + ri, err := i.ResolveRuntimeImage(ctx, *newImage.ParentImage) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "unable to read config") + return ocispec.Descriptor{}, err } - if err := json.Unmarshal(b, &img); err != nil { + if err := json.Unmarshal(ri.ConfigBytes, &img); err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal config") } } + // Get compressed layer descriptors, migrate is needed + layers, err := i.compressedLayers(ctx, img.RootFS.DiffIDs) + if err != nil { + return ocispec.Descriptor{}, err + } + // merge with new image config created := time.Now().UTC() img.Created = &created @@ -295,7 +313,10 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag EmptyLayer: isEmptyLayer, }) img.Author = newImage.Author - img.OS = newImage.OS + if img.OS == "" { + // TODO(containerd): why isn't this getting set to anything + img.OS = newImage.OS + } img.Config = newImage.Config img.ContainerConfig = *newImage.ContainerConfig @@ -322,8 +343,8 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag key: layerID.String(), } - if newImage.ParentImageID != "" { - labels[LabelImageParent] = newImage.ParentImageID + if newImage.ParentImage != nil { + labels[LabelImageParent] = newImage.ParentImage.Digest.String() } opts := []content.Opt{content.WithLabels(labels)} @@ -334,14 +355,52 @@ func (i *ImageService) CreateImage(ctx context.Context, newImage backend.NewImag return ocispec.Descriptor{}, errors.Wrap(err, "unable to store config") } + // Create and write manifest + m := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: desc, + Layers: layers, + } + + mb, err := json.Marshal(m) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal committed image") + } + + desc = ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Manifest, + Digest: digest.FromBytes(mb), + Size: int64(len(mb)), + } + + labels = map[string]string{ + "containerd.io/gc.ref.content.config": m.Config.Digest.String(), + } + for i, l := range m.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.l%d", i)] = l.Digest.String() + } + + opts = []content.Opt{content.WithLabels(labels)} + ref = fmt.Sprintf("manifest-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + if err := content.WriteBlob(ctx, i.client.ContentStore(), ref, bytes.NewReader(mb), desc, opts...); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to store manifest") + } + // create a dangling image _, err = i.client.ImageService().Create(ctx, images.Image{ - Name: desc.Digest.String(), - Target: desc, - CreatedAt: created, - UpdatedAt: created, + Name: "@" + desc.Digest.String(), + Target: desc, + Labels: map[string]string{ + // TODO(containerd): Add label pointing to cache id + }, }) - if err != nil { + if err != nil && !cerrdefs.IsNotFound(err) { return ocispec.Descriptor{}, errors.Wrapf(err, "failed to create image") } diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index bb60644a36a12..88fd816432e38 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -19,7 +19,6 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" @@ -57,22 +56,19 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) DockerVersion string `json:"docker_version,omitempty"` } - if c.ParentImageID == "" { + var parentID string + if c.ParentImage == nil { img.RootFS.Type = "layers" } else { - parent := ocispec.Descriptor{ - MediaType: images.MediaTypeDockerSchema2Config, - Digest: digest.Digest(c.ParentImageID), - } - - b, err := content.ReadBlob(ctx, i.client.ContentStore(), parent) + ri, err := i.ResolveRuntimeImage(ctx, *c.ParentImage) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "unable to read config") + return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve parent runtime image") } - if err := json.Unmarshal(b, &img); err != nil { + if err := json.Unmarshal(ri.ConfigBytes, &img); err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal config") } + parentID = ri.Config.Digest.String() } cl, err := i.commitLayer(ctx, identity.ChainID(img.RootFS.DiffIDs), c) @@ -137,8 +133,8 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) labels[key] = cl.layer.ChainID().String() } - if c.ParentImageID != "" { - labels[LabelImageParent] = c.ParentImageID + if parentID != "" { + labels[LabelImageParent] = parentID } opts := []content.Opt{content.WithLabels(labels)} @@ -364,7 +360,7 @@ func (i *ImageService) compressedLayers(ctx context.Context, diffs []digest.Dige return nil, err } - n, err := io.Copy(dc, rc) + _, err = io.Copy(dc, rc) rc.Close() if err != nil { return nil, err @@ -372,6 +368,12 @@ func (i *ImageService) compressedLayers(ctx context.Context, diffs []digest.Dige dc.Close() + info, err := w.Status() + if err != nil { + return nil, err + } + n := info.Offset + labels := map[string]string{ "containerd.io/uncompressed": diff.String(), } @@ -445,18 +447,13 @@ func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.R // * it doesn't log a container commit event // // This is a temporary shim. Should be removed when builder stops using commit. -func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (ocispec.Descriptor, error) { container := i.containers.Get(c.ContainerID) if container == nil { - // TODO: use typed error - return "", errors.Errorf("container not found: %s", c.ContainerID) + // TODO(containerd): Use typed error here other than not found + return ocispec.Descriptor{}, errors.Errorf("container not found: %s", c.ContainerID) } c.ContainerMountLabel = container.MountLabel c.ContainerOS = container.OS - c.ParentImageID = string(container.ImageID) - desc, err := i.CommitImage(ctx, c) - if err != nil { - return "", err - } - return image.ID(desc.Digest.String()), nil + return i.CommitImage(ctx, c) } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 46833293d7bfd..d166dcc059226 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -90,18 +90,21 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference }) var ( - layers = map[digest.Digest][]ocispec.Descriptor{} - dlStatus = map[digest.Digest]bool{} - unpackDesc = map[digest.Digest]struct{}{} - unpacks int32 // how many unpacks occurred - lock = sync.Mutex{} - cond = sync.NewCond(&lock) + layers = map[digest.Digest][]ocispec.Descriptor{} + dlStatus = map[digest.Digest]bool{} + unpacks int32 // how many unpacks occurred ) grp, pctx := errgroup.WithContext(pctx) // unpackHandler handles layer unpacking concurrently as soon as // a layer has been downloaded in order unpackHandler := func(h images.Handler) images.Handler { + var ( + index bool + unpackDesc = map[digest.Digest]struct{}{} + lock = sync.Mutex{} + cond = sync.NewCond(&lock) + ) return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { children, err := h.Handle(ctx, desc) if err != nil { @@ -111,6 +114,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference switch desc.MediaType { case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: lock.Lock() + index = true var unknown []ocispec.Descriptor for _, d := range children { if d.Platform == nil { @@ -125,7 +129,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference lock.Unlock() case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: lock.Lock() - if _, ok := unpackDesc[desc.Digest]; !ok { + if _, ok := unpackDesc[desc.Digest]; !ok && index { children = children[:1] } @@ -194,7 +198,12 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference return errors.Wrap(err, "failed to resolve image config for unpack") } - err = i.unpack(pctx, config, layers[config.Digest], progressOutput, nil, nil) + l, ok := layers[config.Digest] + if !ok { + return errors.Wrap(err, "no layers found to unpack") + } + + err = i.unpack(pctx, config, l, progressOutput, nil, nil) if err != nil { return errors.Wrapf(err, "failed to unpack %s", img.Target.Digest) } diff --git a/daemon/images/images.go b/daemon/images/images.go index c4a90acf85acc..53f03704bb8b6 100644 --- a/daemon/images/images.go +++ b/daemon/images/images.go @@ -359,8 +359,8 @@ func (i *ImageService) Images(ctx context.Context, imageFilters filters.Args, al // This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. // The existing image(s) is not destroyed. // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. -func (i *ImageService) SquashImage(id, parent string) (string, error) { - return "", errdefs.NotImplemented(errors.New("squash not implemented")) +func (i *ImageService) SquashImage(ctx context.Context, id ocispec.Descriptor, parent *ocispec.Descriptor) (ocispec.Descriptor, error) { + return ocispec.Descriptor{}, errdefs.NotImplemented(errors.New("squash not implemented")) /* var ( From 0159b82fe0f73ea874e15faa3cf94e4abfb63a44 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 16 May 2019 17:54:36 -0700 Subject: [PATCH 68/73] Add default tag on resolve image name Signed-off-by: Derek McGowan --- daemon/images/image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/images/image.go b/daemon/images/image.go index d224768beab58..5e80703aeef78 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -108,7 +108,7 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe } return imgs[0].Target, nil } - img, err := is.Get(ctx, namedRef.String()) + img, err := is.Get(ctx, reference.TagNameOnly(namedRef).String()) if err != nil { // TODO(containerd): error translation can use common function if !cerrdefs.IsNotFound(err) { From 381eb57044ef5c8a5e33f860f0a99e285b053991 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 22 May 2019 10:37:11 -0700 Subject: [PATCH 69/73] Use containerd exporter Update load to use full image name annotation Signed-off-by: Derek McGowan --- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/images/image.go | 36 +++++++++++-------- daemon/images/image_exporter.go | 47 +++++++++++++++++++++---- daemon/images/image_load.go | 12 +++++-- 5 files changed, 73 insertions(+), 26 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 71866b0651729..82d9b8f59e5ce 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -31,7 +31,7 @@ type imageBackend interface { type importExportBackend interface { LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error ImportImage(ctx context.Context, src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - ExportImage(names []string, outStream io.Writer) error + ExportImage(ctx context.Context, names []string, outStream io.Writer) error } type registryBackend interface { diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index d958721f2220d..2da434204a09d 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -157,7 +157,7 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r names = r.Form["names"] } - if err := s.backend.ExportImage(names, output); err != nil { + if err := s.backend.ExportImage(ctx, names, output); err != nil { if !output.Flushed() { return err } diff --git a/daemon/images/image.go b/daemon/images/image.go index 5e80703aeef78..b5fa30519d86a 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -52,10 +52,15 @@ func (e ErrImageDoesNotExist) NotFound() {} // ResolveImage searches for an image based on the given // reference or identifier. Returns the descriptor of // the image, could be manifest list, manifest, or config. -func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { +func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (d ocispec.Descriptor, err error) { + d, _, err = i.resolveImageName(ctx, refOrID) + return +} + +func (i *ImageService) resolveImageName(ctx context.Context, refOrID string) (ocispec.Descriptor, reference.Named, error) { parsed, err := reference.ParseAnyReference(refOrID) if err != nil { - return ocispec.Descriptor{}, errdefs.InvalidParameter(err) + return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(err) } is := i.client.ImageService() @@ -64,18 +69,18 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe if !ok { digested, ok := parsed.(reference.Digested) if !ok { - return ocispec.Descriptor{}, errdefs.InvalidParameter(errors.New("bad reference")) + return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(errors.New("bad reference")) } imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to lookup digest") + return ocispec.Descriptor{}, nil, errors.Wrap(err, "failed to lookup digest") } if len(imgs) == 0 { - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("image not found with digest")) + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("image not found with digest")) } - return imgs[0].Target, nil + return imgs[0].Target, nil, nil } // If the identifier could be a short ID, attempt to match @@ -86,38 +91,39 @@ func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (ocispe } imgs, err := is.List(ctx, filters...) if err != nil { - return ocispec.Descriptor{}, err + return ocispec.Descriptor{}, nil, err } if len(imgs) == 0 { - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("list returned no images")) + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("list returned no images")) } if len(imgs) > 1 { ref := namedRef.String() digests := map[digest.Digest]struct{}{} for _, img := range imgs { if img.Name == ref { - return img.Target, nil + return img.Target, nil, nil } digests[img.Target.Digest] = struct{}{} } if len(digests) > 1 { - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("ambiguous reference")) + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("ambiguous reference")) } } - return imgs[0].Target, nil + return imgs[0].Target, nil, nil } - img, err := is.Get(ctx, reference.TagNameOnly(namedRef).String()) + namedRef = reference.TagNameOnly(namedRef) + img, err := is.Get(ctx, namedRef.String()) if err != nil { // TODO(containerd): error translation can use common function if !cerrdefs.IsNotFound(err) { - return ocispec.Descriptor{}, err + return ocispec.Descriptor{}, nil, err } - return ocispec.Descriptor{}, errdefs.NotFound(errors.New("id not found")) + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("id not found")) } - return img.Target, nil + return img.Target, namedRef, nil } // RuntimeImage represents a platform-specific image along with the diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 6fb0931d3d9d6..7a6c0b1823bb6 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -1,9 +1,13 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "io" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images/archive" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // ExportImage exports a list of images to the given output stream. The @@ -11,10 +15,39 @@ import ( // stream. All images with the given tag and all versions containing // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. -func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { - // TODO(containerd): use containerd's archive exporter? - // This may require special logic to output the Docker format - //imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - //return imageExporter.Save(names, outStream) - return errdefs.ErrNotImplemented +func (i *ImageService) ExportImage(ctx context.Context, names []string, w io.Writer) error { + images := map[digest.Digest]struct { + target ocispec.Descriptor + names []string + }{} + + for _, name := range names { + desc, named, err := i.resolveImageName(ctx, name) + if err != nil { + return errors.Wrapf(err, "failed to resolve %s to an image", name) + } + ei, ok := images[desc.Digest] + if !ok { + ei.target = desc + } + if named != nil { + ei.names = append(ei.names, named.String()) + } + images[desc.Digest] = ei + } + + opts := []archive.ExportOpt{ + archive.WithPlatform(i.platforms), + } + + for _, img := range images { + if len(img.names) > 0 { + opts = append(opts, archive.WithNamedManifest(img.target, img.names...)) + } else { + opts = append(opts, archive.WithManifest(img.target)) + } + } + + // Add each manifest + return archive.Export(ctx, i.client.ContentStore(), w, opts...) } diff --git a/daemon/images/image_load.go b/daemon/images/image_load.go index e37996c0e6d6c..6cd5842961ace 100644 --- a/daemon/images/image_load.go +++ b/daemon/images/image_load.go @@ -71,9 +71,16 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt } for _, m := range idx.Manifests { - ref := m.Annotations[ocispec.AnnotationRefName] + ref := m.Annotations[images.AnnotationImageName] if ref == "" { - log.G(ctx).Debugf("image skipped, no name for %s", m.Digest.String()) + ref = m.Annotations[ocispec.AnnotationRefName] + if ref == "" { + log.G(ctx).Debugf("image skipped, no name for %s", m.Digest.String()) + } else { + // TODO: Support OCI ref names by providing + // default repository through API + log.G(ctx).Debugf("image only containers OCI ref name %q, repository is missing for %s", ref, m.Digest.String()) + } continue } @@ -108,6 +115,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt } handler = images.SetChildrenLabels(cs, handler) + handler = images.FilterPlatforms(handler, i.platforms) if err := images.Walk(ctx, handler, index); err != nil { return err } From 606ccb50b6eee37a74367120716ad22a31b80e26 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 22 May 2019 10:37:20 -0700 Subject: [PATCH 70/73] Fix integration tests Prevent panics on unimplemented code and remove unnecessary lookups on commit. Signed-off-by: Derek McGowan --- builder/dockerfile/builder.go | 19 ++------------ daemon/commit.go | 2 +- daemon/inspect.go | 10 ++++--- daemon/list.go | 9 ++++--- daemon/prune.go | 26 +++++++++++-------- .../build/build_cgroupns_linux_test.go | 5 +++- 6 files changed, 35 insertions(+), 36 deletions(-) diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index 8d06cf794f5f9..986790a32106f 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -383,13 +383,7 @@ func shortDispatchID(state *dispatchState) string { // // BuildFromConfig is used by the /commit endpoint, with the changes // coming from the query parameter of the same name. -// -// TODO(containerd): Remove this context less function? -// At very least, add context and remove os arg -func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { - if !system.IsOSSupported(os) { - return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) - } +func BuildFromConfig(ctx context.Context, config *container.Config, changes []string) (*container.Config, error) { if len(changes) == 0 { return config, nil } @@ -399,7 +393,7 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co return nil, errdefs.InvalidParameter(err) } - b, err := newBuilder(context.Background(), builderOptions{ + b, err := newBuilder(ctx, builderOptions{ Options: &types.ImageBuildOptions{NoCache: true}, }) if err != nil { @@ -426,18 +420,9 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co commands = append(commands, cmd) } - img, err := b.docker.ResolveImage(context.Background(), config.Image) - if err != nil { - // TODO(containerd): Resolve and wrap this error better? - return nil, errdefs.InvalidParameter(err) - } - dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) // We make mutations to the configuration, ensure we have a copy dispatchRequest.state.runConfig = copyRunConfig(config) - dispatchRequest.state.image = &img - // TODO(containerd): remove OS here after replaced by platform - dispatchRequest.state.operatingSystem = os for _, cmd := range commands { err := dispatch(dispatchRequest, cmd) if err != nil { diff --git a/daemon/commit.go b/daemon/commit.go index d483704b437a5..e10a7eb742cc8 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -150,7 +150,7 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, if c.Config == nil { c.Config = container.Config } - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes, container.OS) + newConfig, err := dockerfile.BuildFromConfig(ctx, c.Config, c.Changes) if err != nil { return "", err } diff --git a/daemon/inspect.go b/daemon/inspect.go index 45a21542549d8..98c806861b4ae 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -78,9 +78,13 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co container.Unlock() if size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) - base.SizeRw = &sizeRw - base.SizeRootFs = &sizeRootFs + + // TODO(containerd): context + id + layer backend must be provided + return nil, errdefs.NotImplemented(errors.New("containerd layer size not implemented")) + + //sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) + //base.SizeRw = &sizeRw + //base.SizeRootFs = &sizeRootFs } return &types.ContainerJSON{ diff --git a/daemon/list.go b/daemon/list.go index acdb932aede03..06a16e7738bbd 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -235,9 +235,12 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list // release lock because size calculation is slow if ctx.Size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) - newC.SizeRw = sizeRw - newC.SizeRootFs = sizeRootFs + // TODO(containerd): context + id + layer backend must be provided + return nil, errdefs.NotImplemented(errors.New("containerd layer size not implemented")) + + //sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) + //newC.SizeRw = sizeRw + //newC.SizeRootFs = sizeRootFs } return newC, nil } diff --git a/daemon/prune.go b/daemon/prune.go index b1bcd5b79c048..f14db0b1b3e28 100644 --- a/daemon/prune.go +++ b/daemon/prune.go @@ -71,17 +71,21 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters. if !matchLabels(pruneFilters, c.Config.Labels) { continue } - cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID) - // TODO: sets RmLink to true? - err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) - if err != nil { - logrus.Warnf("failed to prune container %s: %v", c.ID, err) - continue - } - if cSize > 0 { - rep.SpaceReclaimed += uint64(cSize) - } - rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + + // TODO(containerd): context + id + layer backend must be provided + return nil, errdefs.NotImplemented(errors.New("containerd layer size not implemented")) + + //cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID) + //// TODO: sets RmLink to true? + //err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + //if err != nil { + // logrus.Warnf("failed to prune container %s: %v", c.ID, err) + // continue + //} + //if cSize > 0 { + // rep.SpaceReclaimed += uint64(cSize) + //} + //rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) } } diff --git a/integration/build/build_cgroupns_linux_test.go b/integration/build/build_cgroupns_linux_test.go index 258a1ec97972c..fe3a9daf32da6 100644 --- a/integration/build/build_cgroupns_linux_test.go +++ b/integration/build/build_cgroupns_linux_test.go @@ -30,6 +30,9 @@ func getCgroupFromBuildOutput(buildOutput io.Reader) (string, error) { if err != nil { return "", err } + if m.Error != nil { + return "", m.Error + } if ix := strings.Index(m.Stream, prefix); ix == 0 { return strings.TrimSpace(m.Stream), nil } @@ -88,5 +91,5 @@ func TestCgroupNamespacesBuildDaemonHostMode(t *testing.T) { // When the daemon defaults to host cgroup namespaces, containers // launched should not be inside their own cgroup namespaces containerCgroup, daemonCgroup := testBuildWithCgroupNs(t, "host") - assert.Assert(t, daemonCgroup == containerCgroup) + assert.Assert(t, daemonCgroup == containerCgroup, "Different cgroups, daemon=%q, container=%q", daemonCgroup, containerCgroup) } From e39a035708e1fcde5161a4346d69203b53b6468f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 23 May 2019 16:44:45 -0700 Subject: [PATCH 71/73] Remove unnecessary layer store OS check Signed-off-by: Derek McGowan --- layer/layer_store.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/layer/layer_store.go b/layer/layer_store.go index 7b8c011f4c07c..ecc6f9cd0a50f 100644 --- a/layer/layer_store.go +++ b/layer/layer_store.go @@ -15,7 +15,6 @@ import ( "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" @@ -81,9 +80,6 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { // metadata store and graph driver. The metadata store will be used to restore // the Store. func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { - if !system.IsOSSupported(os) { - return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) - } caps := graphdriver.Capabilities{} if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { caps = capDriver.Capabilities() From c5df7686159481e3e9b3923c6151f7c156aa3dbb Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 20 Aug 2019 11:29:52 -0700 Subject: [PATCH 72/73] Update containerd vendor Signed-off-by: Derek McGowan --- daemon/images/image_exporter.go | 6 +- vendor.conf | 16 +- vendor/github.com/containerd/cgroups/blkio.go | 1 + .../github.com/containerd/cgroups/cgroup.go | 3 + .../github.com/containerd/cgroups/memory.go | 4 + .../containerd/cgroups/metrics.pb.go | 654 +++++-- .../containerd/cgroups/metrics.proto | 13 + vendor/github.com/containerd/cgroups/utils.go | 2 +- .../containerd/containerd/README.md | 2 +- .../api/services/diff/v1/diff.pb.go | 266 ++- .../api/services/diff/v1/diff.proto | 3 + .../introspection/v1/introspection.pb.go | 284 ++- .../introspection/v1/introspection.proto | 7 + .../api/services/leases/v1/leases.pb.go | 1618 ++++++++++++++--- .../api/services/leases/v1/leases.proto | 38 + .../archive/compression/compression.go | 2 +- .../containerd/archive/time_unix.go | 2 +- .../containerd/containerd/cio/io.go | 29 +- .../containerd/containerd/client.go | 146 +- .../containerd/containerd/container.go | 36 +- .../containerd/containerd/container_opts.go | 77 +- .../containerd/container_opts_unix.go | 15 +- .../containerd/containerd/content/helpers.go | 31 +- .../containerd/content/local/store.go | 16 +- .../contrib/seccomp/seccomp_default.go | 7 +- .../containerd/defaults/defaults.go | 4 +- .../containerd/defaults/defaults_unix.go | 2 + .../containerd/defaults/defaults_windows.go | 2 + .../github.com/containerd/containerd/diff.go | 13 +- .../containerd/containerd/diff/apply/apply.go | 84 +- .../containerd/containerd/diff/diff.go | 20 +- .../containerd/containerd/diff/stream.go | 187 ++ .../containerd/containerd/diff/stream_unix.go | 146 ++ .../containerd/diff/stream_windows.go | 165 ++ .../containerd/diff/walking/differ.go | 9 +- .../containerd/containerd/errdefs/errors.go | 17 +- .../containerd/containerd/errdefs/grpc.go | 9 + .../containerd/containerd/export.go | 26 +- .../github.com/containerd/containerd/gc/gc.go | 7 + .../github.com/containerd/containerd/image.go | 42 +- .../images/annotations.go} | 8 +- .../containerd/images/archive/exporter.go | 468 +++++ .../containerd/images/archive/importer.go | 152 +- .../containerd/images/archive/reference.go | 30 +- .../containerd/containerd/images/image.go | 79 +- .../containerd/images/mediatypes.go | 2 + .../containerd/images/oci/exporter.go | 241 --- .../containerd/containerd/import.go | 47 +- .../containerd/containerd/leases/lease.go | 10 + .../containerd/leases/proxy/manager.go | 40 + .../containerd/containerd/metadata/content.go | 4 +- .../containerd/containerd/metadata/gc.go | 39 +- .../containerd/containerd/metadata/leases.go | 157 ++ .../containerd/metadata/namespaces.go | 10 +- .../containerd/metadata/snapshot.go | 39 +- .../containerd/containerd/namespaces.go | 16 +- .../containerd/namespaces/context.go | 12 +- .../containerd/containerd/namespaces/store.go | 11 +- .../containerd/containerd/namespaces/ttrpc.go | 51 + .../process.go => namespaces_opts_linux.go} | 34 +- .../containerd/containerd/oci/spec_opts.go | 50 + .../containerd/oci/spec_opts_linux.go | 64 + .../containerd/oci/spec_opts_unix.go | 63 + .../containerd/oci/spec_opts_windows.go | 5 + .../containerd/pkg/dialer/dialer.go | 14 +- .../proc => pkg/process}/deleted_state.go | 5 +- .../v1/linux/proc => pkg/process}/exec.go | 26 +- .../linux/proc => pkg/process}/exec_state.go | 2 +- .../v1/linux/proc => pkg/process}/init.go | 40 +- .../linux/proc => pkg/process}/init_state.go | 42 +- .../v1/linux/proc => pkg/process}/io.go | 28 +- .../proc/proc.go => pkg/process/process.go} | 28 +- .../v1/linux/proc => pkg/process}/types.go | 2 +- .../v1/linux/proc => pkg/process}/utils.go | 56 +- .../pkg/stdio/platform.go} | 21 +- .../pkg/stdio/stdio.go} | 20 +- .../containerd/platforms/compare.go | 37 + .../containerd/platforms/cpuinfo.go | 2 +- .../containerd/containerd/plugin/plugin.go | 49 +- .../github.com/containerd/containerd/pull.go | 2 +- .../containerd/remotes/docker/authorizer.go | 299 ++- .../containerd/remotes/docker/fetcher.go | 133 +- .../containerd/remotes/docker/handler.go | 42 + .../containerd/remotes/docker/pusher.go | 189 +- .../containerd/remotes/docker/registry.go | 202 ++ .../containerd/remotes/docker/resolver.go | 482 +++-- .../remotes/docker/schema1/converter.go | 4 +- .../containerd/remotes/docker/scope.go | 43 +- .../containerd/containerd/remotes/handlers.go | 55 +- .../containerd/containerd/rootfs/apply.go | 26 +- .../containerd/containerd/runtime/task.go | 3 + .../containerd/runtime/v1/linux/bundle.go | 7 + .../containerd/runtime/v1/linux/runtime.go | 69 +- .../containerd/runtime/v1/linux/task.go | 13 +- .../runtime/v1/shim/client/client.go | 35 +- .../containerd/runtime/v1/shim/service.go | 48 +- .../containerd/services/diff/local.go | 21 +- .../containerd/services/images/helpers.go | 14 +- .../containerd/services/leases/local.go | 24 + .../containerd/services/leases/service.go | 50 + .../services/server/config/config.go | 95 +- .../containerd/services/server/server.go | 142 +- .../services/server/server_linux.go | 5 + .../services/server/server_solaris.go | 2 +- .../services/server/server_unsupported.go | 5 + .../services/server/server_windows.go | 5 + .../containerd/snapshots/snapshotter.go | 13 +- .../containerd/containerd/task_opts_unix.go | 26 + .../containerd/containerd/vendor.conf | 42 +- .../containerd/continuity/context.go | 673 ------- .../containerd/continuity/digests.go | 104 -- .../containerd/continuity/groups_unix.go | 129 -- .../containerd/continuity/hardlinks.go | 73 - .../containerd/continuity/hardlinks_unix.go | 52 - .../containerd/continuity/ioutils.go | 63 - .../containerd/continuity/manifest.go | 160 -- .../continuity/proto/manifest.pb.go | 181 -- .../continuity/proto/manifest.proto | 97 - .../containerd/continuity/resource.go | 590 ------ .../containerd/continuity/resource_unix.go | 53 - vendor/github.com/containerd/fifo/raw.go | 116 -- vendor/github.com/containerd/fifo/readme.md | 12 - vendor/github.com/containerd/go-runc/runc.go | 12 +- vendor/github.com/containerd/ttrpc/channel.go | 5 +- vendor/github.com/containerd/ttrpc/client.go | 207 ++- vendor/github.com/containerd/ttrpc/config.go | 15 +- .../containerd/ttrpc/interceptor.go | 50 + .../github.com/containerd/ttrpc/metadata.go | 107 ++ vendor/github.com/containerd/ttrpc/server.go | 22 +- .../github.com/containerd/ttrpc/services.go | 14 +- vendor/github.com/containerd/ttrpc/types.go | 28 +- vendor/github.com/containerd/typeurl/LICENSE | 18 +- .../github.com/containerd/typeurl/README.md | 10 - vendor/github.com/containerd/typeurl/doc.go | 83 - vendor/github.com/containerd/typeurl/types.go | 5 +- .../moby/buildkit/util/leaseutil/manager.go | 37 +- 136 files changed, 6944 insertions(+), 4083 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/diff/stream.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_unix.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_windows.go rename vendor/github.com/containerd/{continuity/proto/gen.go => containerd/images/annotations.go} (73%) create mode 100644 vendor/github.com/containerd/containerd/images/archive/exporter.go delete mode 100644 vendor/github.com/containerd/containerd/images/oci/exporter.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/ttrpc.go rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc/process.go => namespaces_opts_linux.go} (54%) create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_linux.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_unix.go rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/deleted_state.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec.go (91%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec_state.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init_state.go (92%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/io.go (89%) rename vendor/github.com/containerd/containerd/{runtime/proc/proc.go => pkg/process/process.go} (70%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/types.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/utils.go (70%) rename vendor/github.com/containerd/{continuity/hardlinks_windows.go => containerd/pkg/stdio/platform.go} (58%) rename vendor/github.com/containerd/{continuity/resource_windows.go => containerd/pkg/stdio/stdio.go} (68%) create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/registry.go delete mode 100644 vendor/github.com/containerd/continuity/context.go delete mode 100644 vendor/github.com/containerd/continuity/digests.go delete mode 100644 vendor/github.com/containerd/continuity/groups_unix.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks.go delete mode 100644 vendor/github.com/containerd/continuity/hardlinks_unix.go delete mode 100644 vendor/github.com/containerd/continuity/ioutils.go delete mode 100644 vendor/github.com/containerd/continuity/manifest.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.pb.go delete mode 100644 vendor/github.com/containerd/continuity/proto/manifest.proto delete mode 100644 vendor/github.com/containerd/continuity/resource.go delete mode 100644 vendor/github.com/containerd/continuity/resource_unix.go delete mode 100644 vendor/github.com/containerd/fifo/raw.go create mode 100644 vendor/github.com/containerd/ttrpc/interceptor.go create mode 100644 vendor/github.com/containerd/ttrpc/metadata.go delete mode 100644 vendor/github.com/containerd/typeurl/doc.go diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 7a6c0b1823bb6..e792f15d4f02d 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -41,11 +41,7 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, w io.Wri } for _, img := range images { - if len(img.names) > 0 { - opts = append(opts, archive.WithNamedManifest(img.target, img.names...)) - } else { - opts = append(opts, archive.WithManifest(img.target)) - } + opts = append(opts, archive.WithManifest(img.target, img.names...)) } // Add each manifest diff --git a/vendor.conf b/vendor.conf index 5a0ceb33dab02..064426acf424d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -27,7 +27,7 @@ github.com/imdario/mergo 7c29201646fa3de8506f70121347 golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c # buildkit -github.com/moby/buildkit c24275065aca6605bd83c57c6735510f4ebeb6d9 +github.com/moby/buildkit 23c4e9762fbce7f3cd03142ad99e7c5026248fe8 https://github.com/dmcgowan/buildkit.git github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 @@ -119,14 +119,14 @@ github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 # containerd -github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870 -github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c -github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7 -github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/containerd f06e605f1aef6150b5b4d4556e5b84eeb758fb51 #v1.3.0-beta.1 +github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c +github.com/containerd/continuity f2a389ac0a02ce21c09edd7344677a601970f41c +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/containerd/go-runc 7d11b49dc0769f6dbb0d1b19f3d48524d1bad9ad -github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d -github.com/containerd/ttrpc 699c4e40d1e7416e08bf7019c7ce2e9beced4636 +github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1 +github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 +github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0 # cluster diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go index 875fb55465993..7c498def65b1a 100644 --- a/vendor/github.com/containerd/cgroups/blkio.go +++ b/vendor/github.com/containerd/cgroups/blkio.go @@ -86,6 +86,7 @@ func (b *blkioController) Stat(path string, stats *Metrics) error { } // Try to read CFQ stats available on all CFQ enabled kernels first if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { + settings = []blkioStatSettings{} settings = append(settings, blkioStatSettings{ name: "sectors_recursive", diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go index e3ef076519d52..53866685b36c2 100644 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ b/vendor/github.com/containerd/cgroups/cgroup.go @@ -497,6 +497,9 @@ func (c *cgroup) MoveTo(destination Cgroup) error { } for _, p := range processes { if err := destination.Add(p); err != nil { + if strings.Contains(err.Error(), "no such process") { + continue + } return err } } diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go index ce15ca2b9a336..b8b5fe7ea0e86 100644 --- a/vendor/github.com/containerd/cgroups/memory.go +++ b/vendor/github.com/containerd/cgroups/memory.go @@ -281,6 +281,10 @@ func getMemorySettings(resources *specs.LinuxResources) []memorySettings { name: "limit_in_bytes", value: mem.Limit, }, + { + name: "soft_limit_in_bytes", + value: mem.Reservation, + }, { name: "memsw.limit_in_bytes", value: mem.Swap, diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go index 6043a8f7db02b..7dd7f6f3c4061 100644 --- a/vendor/github.com/containerd/cgroups/metrics.pb.go +++ b/vendor/github.com/containerd/cgroups/metrics.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/cgroups/metrics.proto -// DO NOT EDIT! /* Package cgroups is a generated protocol buffer package. @@ -21,6 +20,7 @@ BlkIOEntry RdmaStat RdmaEntry + NetworkStat */ package cgroups @@ -52,6 +52,7 @@ type Metrics struct { Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"` Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"` Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network" json:"network,omitempty"` } func (m *Metrics) Reset() { *m = Metrics{} } @@ -209,6 +210,22 @@ func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } func (*RdmaEntry) ProtoMessage() {} func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} } +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{12} } + func init() { proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") @@ -222,6 +239,7 @@ func init() { proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") } func (m *Metrics) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -300,6 +318,18 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { } i += n5 } + if len(m.Network) > 0 { + for _, msg := range m.Network { + dAtA[i] = 0x3a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -389,21 +419,21 @@ func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n5, err := m.Usage.MarshalTo(dAtA[i:]) + n6, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 } if m.Throttling != nil { dAtA[i] = 0x12 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) - n6, err := m.Throttling.MarshalTo(dAtA[i:]) + n7, err := m.Throttling.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } return i, nil } @@ -439,21 +469,21 @@ func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMetrics(dAtA, i, uint64(m.User)) } if len(m.PerCPU) > 0 { - dAtA8 := make([]byte, len(m.PerCPU)*10) - var j7 int + dAtA9 := make([]byte, len(m.PerCPU)*10) + var j8 int for _, num := range m.PerCPU { for num >= 1<<7 { - dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80) + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j7++ + j8++ } - dAtA8[j7] = uint8(num) - j7++ + dAtA9[j8] = uint8(num) + j8++ } dAtA[i] = 0x22 i++ - i = encodeVarintMetrics(dAtA, i, uint64(j7)) - i += copy(dAtA[i:], dAtA8[:j7]) + i = encodeVarintMetrics(dAtA, i, uint64(j8)) + i += copy(dAtA[i:], dAtA9[:j8]) } return i, nil } @@ -706,11 +736,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n9, err := m.Usage.MarshalTo(dAtA[i:]) + n10, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } if m.Swap != nil { dAtA[i] = 0x92 @@ -718,11 +748,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) - n10, err := m.Swap.MarshalTo(dAtA[i:]) + n11, err := m.Swap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.Kernel != nil { dAtA[i] = 0x9a @@ -730,11 +760,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) - n11, err := m.Kernel.MarshalTo(dAtA[i:]) + n12, err := m.Kernel.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.KernelTCP != nil { dAtA[i] = 0xa2 @@ -742,11 +772,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) - n12, err := m.KernelTCP.MarshalTo(dAtA[i:]) + n13, err := m.KernelTCP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } return i, nil } @@ -766,7 +796,6 @@ func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { dAtA[i] = 0x8 i++ @@ -1025,24 +1054,70 @@ func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.RxBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + } + return i, nil } + func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1081,6 +1156,12 @@ func (m *Metrics) Size() (n int) { l = m.Rdma.Size() n += 1 + l + sovMetrics(uint64(l)) } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } return n } @@ -1413,6 +1494,40 @@ func (m *RdmaEntry) Size() (n int) { return n } +func (m *NetworkStat) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + return n +} + func sovMetrics(x uint64) (n int) { for { n++ @@ -1437,6 +1552,7 @@ func (this *Metrics) String() string { `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`, `}`, }, "") return s @@ -1613,6 +1729,24 @@ func (this *RdmaEntry) String() string { }, "") return s } +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `}`, + }, "") + return s +} func valueToStringMetrics(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1624,7 +1758,6 @@ func valueToStringMetrics(v interface{}) string { func (m *Metrics) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 - for iNdEx < l { preIndex := iNdEx var wire uint64 @@ -1847,6 +1980,37 @@ func (m *Metrics) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -4092,7 +4256,237 @@ func (m *RdmaEntry) Unmarshal(dAtA []byte) error { } return nil } +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipMetrics(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -4201,88 +4595,102 @@ var ( func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) } var fileDescriptorMetrics = []byte{ - // 1325 bytes of a gzipped FileDescriptorProto + // 1549 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, - 0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2, - 0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6, - 0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8, - 0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25, - 0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a, - 0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02, - 0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28, - 0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7, - 0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7, - 0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57, - 0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a, - 0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f, - 0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5, - 0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc, - 0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f, - 0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7, - 0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a, - 0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b, - 0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d, - 0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a, - 0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63, - 0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff, - 0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9, - 0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2, - 0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa, - 0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a, - 0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42, - 0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9, - 0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18, - 0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5, - 0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8, - 0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58, - 0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2, - 0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e, - 0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3, - 0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87, - 0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02, - 0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd, - 0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59, - 0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97, - 0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b, - 0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25, - 0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b, - 0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47, - 0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad, - 0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9, - 0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7, - 0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1, - 0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58, - 0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06, - 0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61, - 0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d, - 0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20, - 0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7, - 0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d, - 0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85, - 0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a, - 0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f, - 0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16, - 0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe, - 0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c, - 0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63, - 0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe, - 0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc, - 0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22, - 0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb, - 0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13, - 0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94, - 0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a, - 0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c, - 0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68, - 0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e, - 0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19, - 0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1, - 0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9, - 0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2, - 0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3, - 0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3, - 0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0, - 0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69, - 0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00, + 0x16, 0x8d, 0x2c, 0xd9, 0xd2, 0x5c, 0xd9, 0x8e, 0x4d, 0x27, 0xce, 0xd8, 0x49, 0x2c, 0x47, 0xb6, + 0xdf, 0xf3, 0x7b, 0x06, 0x64, 0xbc, 0x3c, 0x20, 0x68, 0xd2, 0x04, 0x45, 0xe4, 0x24, 0x48, 0xd0, + 0xba, 0x51, 0x46, 0x36, 0xd2, 0xae, 0x06, 0xd4, 0x88, 0x19, 0xd1, 0x96, 0x86, 0x13, 0x0e, 0xc7, + 0x96, 0xbb, 0xea, 0xa2, 0x40, 0x57, 0xfd, 0x33, 0xfd, 0x15, 0x59, 0x76, 0x53, 0xa0, 0xdd, 0x18, + 0x8d, 0x7e, 0x49, 0x41, 0x72, 0x3e, 0xa8, 0x24, 0x8e, 0xab, 0xdd, 0x90, 0x3c, 0xe7, 0xdc, 0xcb, + 0x3b, 0x87, 0xc3, 0x3b, 0xb0, 0xe3, 0x53, 0xd1, 0x8b, 0x3b, 0x0d, 0x8f, 0x0d, 0x76, 0x3d, 0x16, + 0x08, 0x4c, 0x03, 0xc2, 0xbb, 0xbb, 0x9e, 0xcf, 0x59, 0x1c, 0x46, 0xbb, 0x03, 0x22, 0x38, 0xf5, + 0xa2, 0x46, 0xc8, 0x99, 0x60, 0xc8, 0xa6, 0xac, 0x91, 0x83, 0x1a, 0x09, 0xa8, 0x71, 0xf2, 0xbf, + 0xd5, 0x6b, 0x3e, 0xf3, 0x99, 0x02, 0xed, 0xca, 0x27, 0x8d, 0xaf, 0xff, 0x5a, 0x84, 0xf2, 0xbe, + 0x56, 0x40, 0x5f, 0x41, 0xb9, 0x17, 0xfb, 0x44, 0xf4, 0x3b, 0x76, 0x61, 0xbd, 0xb8, 0x5d, 0xbd, + 0xbb, 0xd5, 0xb8, 0x48, 0xad, 0xf1, 0x5c, 0x03, 0xdb, 0x02, 0x0b, 0x27, 0x65, 0xa1, 0x7b, 0x50, + 0x0a, 0x69, 0x37, 0xb2, 0xa7, 0xd6, 0x0b, 0xdb, 0xd5, 0xbb, 0xf5, 0x8b, 0xd9, 0x2d, 0xda, 0x8d, + 0x14, 0x55, 0xe1, 0xd1, 0x43, 0x28, 0x7a, 0x61, 0x6c, 0x17, 0x15, 0xed, 0xce, 0xc5, 0xb4, 0xbd, + 0xd6, 0xa1, 0x64, 0x35, 0xcb, 0xa3, 0xf3, 0x5a, 0x71, 0xaf, 0x75, 0xe8, 0x48, 0x1a, 0x7a, 0x08, + 0x33, 0x03, 0x32, 0x60, 0xfc, 0xcc, 0x2e, 0x29, 0x81, 0xcd, 0x8b, 0x05, 0xf6, 0x15, 0x4e, 0x45, + 0x4e, 0x38, 0xe8, 0x3e, 0x4c, 0x77, 0xfa, 0xc7, 0x94, 0xd9, 0xd3, 0x8a, 0xbc, 0x71, 0x31, 0xb9, + 0xd9, 0x3f, 0x7e, 0xf1, 0x52, 0x71, 0x35, 0x43, 0x6e, 0x97, 0x77, 0x07, 0xd8, 0x9e, 0xb9, 0x6c, + 0xbb, 0x4e, 0x77, 0x80, 0xf5, 0x76, 0x25, 0x5e, 0xd6, 0x39, 0x20, 0xe2, 0x94, 0xf1, 0x63, 0xbb, + 0x7c, 0x59, 0x9d, 0xbf, 0xd5, 0x40, 0x5d, 0xe7, 0x84, 0x55, 0x3f, 0x86, 0xaa, 0x51, 0x7f, 0x74, + 0x0d, 0xa6, 0xe3, 0x08, 0xfb, 0xc4, 0x2e, 0xac, 0x17, 0xb6, 0x4b, 0x8e, 0x1e, 0xa0, 0x05, 0x28, + 0x0e, 0xf0, 0x50, 0xbd, 0x8b, 0x92, 0x23, 0x1f, 0x91, 0x0d, 0xe5, 0x37, 0x98, 0xf6, 0xbd, 0x40, + 0xa8, 0x52, 0x97, 0x9c, 0x74, 0x88, 0x56, 0xa1, 0x12, 0x62, 0x9f, 0x44, 0xf4, 0x07, 0xa2, 0x8a, + 0x68, 0x39, 0xd9, 0xb8, 0xfe, 0x00, 0x2a, 0xe9, 0xeb, 0x92, 0x0a, 0x5e, 0xcc, 0x39, 0x09, 0x44, + 0x12, 0x2b, 0x1d, 0xca, 0x1c, 0xfa, 0x74, 0x40, 0x45, 0x12, 0x4f, 0x0f, 0xea, 0x3f, 0x17, 0xa0, + 0x9c, 0xbc, 0x34, 0xf4, 0x85, 0x99, 0xe5, 0x67, 0xcb, 0xb5, 0xd7, 0x3a, 0x3c, 0x94, 0xc8, 0x74, + 0x27, 0x4d, 0x00, 0xd1, 0xe3, 0x4c, 0x88, 0x3e, 0x0d, 0xfc, 0xcb, 0xcd, 0x75, 0xa0, 0xb1, 0xc4, + 0x31, 0x58, 0xf5, 0xb7, 0x50, 0x49, 0x65, 0x65, 0xae, 0x82, 0x09, 0xdc, 0x4f, 0xeb, 0xa5, 0x06, + 0x68, 0x19, 0x66, 0x8e, 0x09, 0x0f, 0x48, 0x3f, 0xd9, 0x42, 0x32, 0x42, 0x08, 0x4a, 0x71, 0x44, + 0x78, 0x52, 0x32, 0xf5, 0x8c, 0x36, 0xa0, 0x1c, 0x12, 0xee, 0x4a, 0xd3, 0x96, 0xd6, 0x8b, 0xdb, + 0xa5, 0x26, 0x8c, 0xce, 0x6b, 0x33, 0x2d, 0xc2, 0xa5, 0x29, 0x67, 0x42, 0xc2, 0xf7, 0xc2, 0xb8, + 0x3e, 0x84, 0x4a, 0x9a, 0x8a, 0x2c, 0x5c, 0x48, 0x38, 0x65, 0xdd, 0x28, 0x2d, 0x5c, 0x32, 0x44, + 0x3b, 0xb0, 0x98, 0xa4, 0x49, 0xba, 0x6e, 0x8a, 0xd1, 0x19, 0x2c, 0x64, 0x0b, 0xad, 0x04, 0xbc, + 0x05, 0xf3, 0x39, 0x58, 0xd0, 0x01, 0x49, 0xb2, 0x9a, 0xcb, 0x66, 0x0f, 0xe8, 0x80, 0xd4, 0xff, + 0xac, 0x02, 0xe4, 0x56, 0x97, 0xfb, 0xf5, 0xb0, 0xd7, 0xcb, 0xfc, 0xa1, 0x06, 0x68, 0x05, 0x8a, + 0x3c, 0x4a, 0x42, 0xe9, 0x13, 0xe5, 0xb4, 0xdb, 0x8e, 0x9c, 0x43, 0xff, 0x82, 0x0a, 0x8f, 0x22, + 0x57, 0x1e, 0x6b, 0x1d, 0xa0, 0x59, 0x1d, 0x9d, 0xd7, 0xca, 0x4e, 0xbb, 0x2d, 0x6d, 0xe7, 0x94, + 0x79, 0x14, 0xc9, 0x07, 0x54, 0x83, 0xea, 0x00, 0x87, 0x21, 0xe9, 0xba, 0x6f, 0x68, 0x5f, 0x3b, + 0xa7, 0xe4, 0x80, 0x9e, 0x7a, 0x46, 0xfb, 0xaa, 0xd2, 0x5d, 0xca, 0xc5, 0x99, 0x3a, 0x5c, 0x25, + 0x47, 0x0f, 0xd0, 0x2d, 0xb0, 0x4e, 0x39, 0x15, 0xa4, 0x83, 0xbd, 0x63, 0x75, 0x78, 0x4a, 0x4e, + 0x3e, 0x81, 0x6c, 0xa8, 0x84, 0xbe, 0x1b, 0xfa, 0x2e, 0x0d, 0xec, 0xb2, 0x7e, 0x13, 0xa1, 0xdf, + 0xf2, 0x5f, 0x04, 0x68, 0x15, 0x2c, 0xbd, 0xc2, 0x62, 0x61, 0x57, 0x92, 0x32, 0xfa, 0x2d, 0xff, + 0x65, 0x2c, 0xd0, 0x8a, 0x62, 0xbd, 0xc1, 0x71, 0x5f, 0xd8, 0x56, 0xba, 0xf4, 0x4c, 0x0e, 0xd1, + 0x3a, 0xcc, 0x86, 0xbe, 0x3b, 0xc0, 0x47, 0xc9, 0x32, 0xe8, 0x34, 0x43, 0x7f, 0x1f, 0x1f, 0x69, + 0xc4, 0x06, 0xcc, 0xd1, 0x00, 0x7b, 0x82, 0x9e, 0x10, 0x17, 0x07, 0x2c, 0xb0, 0xab, 0x0a, 0x32, + 0x9b, 0x4e, 0x3e, 0x0e, 0x58, 0x20, 0x37, 0x6b, 0x42, 0x66, 0xb5, 0x8a, 0x01, 0x30, 0x55, 0x54, + 0x3d, 0xe6, 0xc6, 0x55, 0x54, 0x45, 0x72, 0x15, 0x05, 0x99, 0x37, 0x55, 0x14, 0x60, 0x1d, 0xaa, + 0x71, 0x40, 0x4e, 0xa8, 0x27, 0x70, 0xa7, 0x4f, 0xec, 0xab, 0x0a, 0x60, 0x4e, 0xa1, 0x07, 0xb0, + 0xd2, 0xa3, 0x84, 0x63, 0xee, 0xf5, 0xa8, 0x87, 0xfb, 0xae, 0xfe, 0x90, 0xb9, 0xfa, 0xf8, 0x2d, + 0x28, 0xfc, 0x0d, 0x13, 0xa0, 0x9d, 0xf0, 0x8d, 0x5c, 0x46, 0xf7, 0x60, 0x6c, 0xc9, 0x8d, 0x4e, + 0x71, 0x98, 0x30, 0x17, 0x15, 0xf3, 0xba, 0xb9, 0xdc, 0x3e, 0xc5, 0xa1, 0xe6, 0xd5, 0xa0, 0xaa, + 0x4e, 0x89, 0xab, 0x8d, 0x84, 0x74, 0xda, 0x6a, 0x6a, 0x4f, 0xb9, 0xe9, 0x3f, 0x60, 0x69, 0x80, + 0xf4, 0xd4, 0x92, 0xf2, 0xcc, 0xec, 0xe8, 0xbc, 0x56, 0x39, 0x90, 0x93, 0xd2, 0x58, 0x15, 0xb5, + 0xec, 0x44, 0x11, 0xba, 0x07, 0xf3, 0x19, 0x54, 0x7b, 0xec, 0x9a, 0xc2, 0x2f, 0x8c, 0xce, 0x6b, + 0xb3, 0x29, 0x5e, 0x19, 0x6d, 0x36, 0xe5, 0x28, 0xb7, 0xfd, 0x17, 0x16, 0x35, 0xcf, 0xf4, 0xdc, + 0x75, 0x95, 0xc9, 0x55, 0xb5, 0xb0, 0x9f, 0x1b, 0x2f, 0xcb, 0x57, 0xdb, 0x6f, 0xd9, 0xc8, 0xf7, + 0x89, 0xf2, 0xe0, 0xbf, 0x41, 0x73, 0xdc, 0xdc, 0x89, 0x37, 0x14, 0x48, 0xe7, 0xf6, 0x3a, 0xb3, + 0xe3, 0x46, 0x9a, 0x6d, 0x66, 0x4a, 0x5b, 0xbf, 0x12, 0x35, 0xdb, 0xd2, 0xce, 0xdc, 0x4a, 0xd5, + 0x72, 0x7f, 0xae, 0xe8, 0x97, 0x9f, 0xa1, 0xa4, 0x49, 0x37, 0x0d, 0x2d, 0xed, 0xc5, 0xd5, 0x31, + 0x94, 0x76, 0xe3, 0x0e, 0xa0, 0x0c, 0x95, 0xbb, 0xf6, 0xa6, 0xb1, 0xd1, 0x56, 0x6e, 0xdd, 0x06, + 0x2c, 0x69, 0xf0, 0xb8, 0x81, 0x6f, 0x29, 0xb4, 0xae, 0xd7, 0x0b, 0xd3, 0xc5, 0x59, 0x11, 0x4d, + 0xf4, 0x6d, 0x43, 0xfb, 0x71, 0x8e, 0xfd, 0x58, 0x5b, 0x95, 0x7c, 0xed, 0x13, 0xda, 0xaa, 0xe8, + 0x1f, 0x6a, 0x2b, 0x74, 0xed, 0x23, 0x6d, 0x85, 0xdd, 0x49, 0xb1, 0xa6, 0xd9, 0xd7, 0x93, 0xcf, + 0x9e, 0x5c, 0x38, 0x34, 0x1c, 0xff, 0x65, 0x7a, 0x75, 0xdc, 0x51, 0xdf, 0xfe, 0xad, 0xcb, 0x2e, + 0xf8, 0xa7, 0x81, 0xe0, 0x67, 0xe9, 0xed, 0x71, 0x1f, 0x4a, 0xd2, 0xe5, 0x76, 0x7d, 0x12, 0xae, + 0xa2, 0xa0, 0x47, 0xd9, 0x95, 0xb0, 0x31, 0x09, 0x39, 0xbd, 0x39, 0xda, 0x00, 0xfa, 0xc9, 0x15, + 0x5e, 0x68, 0x6f, 0x4e, 0x20, 0xd1, 0x9c, 0x1b, 0x9d, 0xd7, 0xac, 0xaf, 0x15, 0xf9, 0x60, 0xaf, + 0xe5, 0x58, 0x5a, 0xe7, 0xc0, 0x0b, 0xeb, 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd, + 0x9b, 0x77, 0x04, 0x53, 0x9f, 0xe8, 0x08, 0x8a, 0x9f, 0xec, 0x08, 0x4a, 0x63, 0x1d, 0x41, 0xfd, + 0xf7, 0x69, 0xb0, 0xb2, 0x86, 0x07, 0x61, 0x58, 0xa5, 0xcc, 0x8d, 0x08, 0x3f, 0xa1, 0x1e, 0x71, + 0x3b, 0x67, 0x82, 0x44, 0x2e, 0x27, 0x5e, 0xcc, 0x23, 0x7a, 0x42, 0x92, 0x66, 0x71, 0xf3, 0x92, + 0xce, 0x49, 0xd7, 0xe6, 0x06, 0x65, 0x6d, 0x2d, 0xd3, 0x94, 0x2a, 0x4e, 0x2a, 0x82, 0xbe, 0x83, + 0xeb, 0x79, 0x88, 0xae, 0xa1, 0x3e, 0x35, 0x81, 0xfa, 0x52, 0xa6, 0xde, 0xcd, 0x95, 0x0f, 0x60, + 0x89, 0x32, 0xf7, 0x6d, 0x4c, 0xe2, 0x31, 0xdd, 0xe2, 0x04, 0xba, 0x8b, 0x94, 0xbd, 0x52, 0xfc, + 0x5c, 0xd5, 0x85, 0x15, 0xa3, 0x24, 0xf2, 0x2e, 0x36, 0xb4, 0x4b, 0x13, 0x68, 0x2f, 0x67, 0x39, + 0xcb, 0xbb, 0x3b, 0x0f, 0xf0, 0x3d, 0x2c, 0x53, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x54, 0x9f, 0x9e, + 0xac, 0x22, 0xaf, 0x31, 0x15, 0xe3, 0xd2, 0xba, 0x22, 0x03, 0xc2, 0xfd, 0xb1, 0x8a, 0xcc, 0x4c, + 0x56, 0x91, 0x7d, 0xc5, 0xcf, 0x55, 0x5b, 0xb0, 0x48, 0xd9, 0x87, 0xb9, 0x96, 0x27, 0xd0, 0xbc, + 0x4a, 0xd9, 0x78, 0x9e, 0xaf, 0x60, 0x31, 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x02, 0xc5, + 0x85, 0x84, 0x9e, 0x49, 0xd6, 0x4f, 0x00, 0xf2, 0x75, 0x34, 0x0f, 0x53, 0x2c, 0x54, 0x47, 0xc7, + 0x72, 0xa6, 0x58, 0x28, 0x7b, 0xc0, 0xae, 0xfc, 0xec, 0xe8, 0x83, 0x63, 0x39, 0xc9, 0x48, 0x9e, + 0xa7, 0x01, 0x3e, 0x62, 0x69, 0x13, 0xa8, 0x07, 0x6a, 0x96, 0x06, 0x8c, 0x27, 0x67, 0x47, 0x0f, + 0xe4, 0xec, 0x09, 0xee, 0xc7, 0x24, 0xed, 0x79, 0xd4, 0xa0, 0xfe, 0x53, 0x01, 0x2a, 0xe9, 0x6f, + 0x00, 0x7a, 0x64, 0xb6, 0xd1, 0xc5, 0xcf, 0xff, 0x75, 0x48, 0x92, 0xde, 0x4c, 0xd6, 0x6b, 0xdf, + 0xcf, 0x7b, 0xed, 0x7f, 0x4c, 0x4e, 0x1a, 0x72, 0x02, 0x56, 0x36, 0x67, 0xec, 0xb6, 0x30, 0xb6, + 0xdb, 0x1a, 0x54, 0x7b, 0x1e, 0x76, 0x7b, 0x38, 0xe8, 0xf6, 0x89, 0xee, 0x10, 0xe7, 0x1c, 0xe8, + 0x79, 0xf8, 0xb9, 0x9e, 0x49, 0x01, 0xac, 0x73, 0x44, 0x3c, 0x11, 0xa9, 0xa2, 0x68, 0xc0, 0x4b, + 0x3d, 0x53, 0xff, 0x65, 0x0a, 0xaa, 0xc6, 0x9f, 0x8b, 0xec, 0xa1, 0x03, 0x3c, 0x48, 0xe3, 0xa8, + 0x67, 0xd9, 0xb1, 0xf1, 0xa1, 0xfe, 0x96, 0x24, 0x9f, 0xa9, 0x32, 0x1f, 0xaa, 0x8f, 0x02, 0xba, + 0x0d, 0xc0, 0x87, 0x6e, 0x88, 0xbd, 0x63, 0x92, 0xc8, 0x97, 0x1c, 0x8b, 0x0f, 0x5b, 0x7a, 0x02, + 0xdd, 0x04, 0x8b, 0x0f, 0x5d, 0xc2, 0x39, 0xe3, 0x51, 0x52, 0xfb, 0x0a, 0x1f, 0x3e, 0x55, 0xe3, + 0x84, 0xdb, 0xe5, 0x4c, 0xf6, 0x02, 0xc9, 0x3b, 0xb0, 0xf8, 0xf0, 0x89, 0x9e, 0x90, 0x51, 0x45, + 0x1a, 0x55, 0xb7, 0x9e, 0x65, 0x91, 0x47, 0x15, 0x79, 0x54, 0xdd, 0x7a, 0x5a, 0xc2, 0x8c, 0x2a, + 0xb2, 0xa8, 0xba, 0xfb, 0xac, 0x08, 0x23, 0xaa, 0xc8, 0xa3, 0x5a, 0x29, 0x37, 0x89, 0xda, 0xb4, + 0xdf, 0xbd, 0x5f, 0xbb, 0xf2, 0xc7, 0xfb, 0xb5, 0x2b, 0x3f, 0x8e, 0xd6, 0x0a, 0xef, 0x46, 0x6b, + 0x85, 0xdf, 0x46, 0x6b, 0x85, 0xbf, 0x46, 0x6b, 0x85, 0xce, 0x8c, 0xfa, 0x0d, 0xff, 0xff, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x19, 0x9d, 0xe2, 0xd3, 0xe5, 0x0f, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/cgroups/metrics.proto b/vendor/github.com/containerd/cgroups/metrics.proto index 642623fcee45d..62b519806c390 100644 --- a/vendor/github.com/containerd/cgroups/metrics.proto +++ b/vendor/github.com/containerd/cgroups/metrics.proto @@ -11,6 +11,7 @@ message Metrics { MemoryStat memory = 4; BlkIOStat blkio = 5; RdmaStat rdma = 6; + repeated NetworkStat network = 7; } message HugetlbStat { @@ -121,3 +122,15 @@ message RdmaEntry { uint32 hca_handles = 2; uint32 hca_objects = 3; } + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go index f3129b1a3af4d..8a97d04ddfe5d 100644 --- a/vendor/github.com/containerd/cgroups/utils.go +++ b/vendor/github.com/containerd/cgroups/utils.go @@ -168,7 +168,7 @@ func readTasksPids(path string, subsystem Name) ([]Task, error) { func hugePageSizes() ([]string, error) { var ( pageSizes []string - sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"} + sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"} ) files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") if err != nil { diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index 9b2ba3def8c98..2323f26f623d0 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -218,7 +218,7 @@ This will be the best place to discuss design and implementation. For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. **Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. -[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ) +[Click here for an invite to docker community slack.](https://dockr.ly/slack) ### Security audit diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index 9ada87346d88d..6c7920004fc64 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -9,6 +9,7 @@ import ( types "github.com/containerd/containerd/api/types" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ApplyRequest struct { // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } @@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo func init() { proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") + proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") @@ -216,36 +219,40 @@ func init() { } var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, - 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, - 0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, - 0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, - 0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, - 0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, - 0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, - 0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, - 0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, - 0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, - 0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, - 0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, - 0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, - 0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, - 0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, - 0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, - 0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, - 0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, - 0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, - 0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, - 0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, - 0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, - 0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, - 0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, - 0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, - 0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, - 0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, + 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, + 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, + 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, + 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, + 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, + 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, + 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, + 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, + 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, + 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, + 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, + 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, + 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, + 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, + 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, + 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, + 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, + 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, + 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, + 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, + 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, + 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, + 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, + 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, + 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, + 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, + 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, + 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, + 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, + 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Payloads) > 0 { + for k, _ := range m.Payloads { + dAtA[i] = 0x1a + i++ + v := m.Payloads[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovDiff(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize + i = encodeVarintDiff(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintDiff(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDiff(dAtA, i, uint64(v.Size())) + n2, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size())) - n2, err := m.Applied.MarshalTo(dAtA[i:]) + n3, err := m.Applied.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size())) - n3, err := m.Diff.MarshalTo(dAtA[i:]) + n4, err := m.Diff.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) { n += 1 + l + sovDiff(uint64(l)) } } + if len(m.Payloads) > 0 { + for k, v := range m.Payloads { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDiff(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string { if this == nil { return "nil" } + keysForPayloads := make([]string, 0, len(this.Payloads)) + for k, _ := range this.Payloads { + keysForPayloads = append(keysForPayloads, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) + mapStringForPayloads := "map[string]*types1.Any{" + for _, k := range keysForPayloads { + mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) + } + mapStringForPayloads += "}" s := strings.Join([]string{`&ApplyRequest{`, `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`, + `Payloads:` + mapStringForPayloads + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDiff + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDiff + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payloads == nil { + m.Payloads = make(map[string]*types1.Any) + } + var mapkey string + var mapvalue *types1.Any + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDiff + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDiff + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDiff + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &types1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDiff(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDiff + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Payloads[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDiff(dAtA[iNdEx:]) diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index 66d7ecb19f6b3..ae2707a258cc3 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package containerd.services.diff.v1; import weak "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -25,6 +26,8 @@ message ApplyRequest { containerd.types.Descriptor diff = 1; repeated containerd.types.Mount mounts = 2; + + map payloads = 3; } message ApplyResponse { diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 016ced4b78eb9..a4e238685efb4 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -10,6 +10,7 @@ import ( rpc "github.com/gogo/googleapis/google/rpc" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo +type ServerResponse struct { + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerResponse) Reset() { *m = ServerResponse{} } +func (*ServerResponse) ProtoMessage() {} +func (*ServerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1a14fda866f10715, []int{3} +} +func (m *ServerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerResponse.Merge(m, src) +} +func (m *ServerResponse) XXX_Size() int { + return m.Size() +} +func (m *ServerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") + proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") } func init() { @@ -203,38 +244,42 @@ func init() { } var fileDescriptor_1a14fda866f10715 = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, - 0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, - 0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, - 0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, - 0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, - 0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, - 0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, - 0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, - 0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, - 0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, - 0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, - 0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, - 0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, - 0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, - 0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, - 0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, - 0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, - 0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, - 0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, - 0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, - 0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, - 0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, - 0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, - 0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, - 0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, - 0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, - 0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, - 0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, - 0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, - 0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, + // 549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, + 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, + 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, + 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, + 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, + 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, + 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, + 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, + 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, + 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, + 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, + 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, + 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, + 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, + 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, + 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, + 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, + 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, + 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, + 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, + 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, + 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, + 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, + 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, + 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, + 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, + 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, + 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, + 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, + 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, + 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, + 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, + 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, + 0xdc, 0x89, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -254,6 +299,8 @@ type IntrospectionClient interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) } type introspectionClient struct { @@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o return out, nil } +func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IntrospectionServer is the server API for Introspection service. type IntrospectionServer interface { // Plugins returns a list of plugins in containerd. @@ -280,6 +336,8 @@ type IntrospectionServer interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *types1.Empty) (*ServerResponse, error) } func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { @@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(types1.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) + } + return interceptor(ctx, in, info, handler) +} + var _Introspection_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.introspection.v1.Introspection", HandlerType: (*IntrospectionServer)(nil), @@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{ MethodName: "Plugins", Handler: _Introspection_Plugins_Handler, }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", @@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ServerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UUID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) + i += copy(dAtA[i:], m.UUID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) { return n } +func (m *ServerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UUID) + if l > 0 { + n += 1 + l + sovIntrospection(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovIntrospection(x uint64) (n int) { for { n++ @@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string { }, "") return s } +func (this *ServerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerResponse{`, + `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringIntrospection(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *ServerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIntrospection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIntrospection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIntrospection(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIntrospection(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 95e804b9b7e41..79cee9a5721aa 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -4,6 +4,7 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; +import "google/protobuf/empty.proto"; import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; @@ -14,6 +15,8 @@ service Introspection { // Clients can use this to detect features and capabilities when using // containerd. rpc Plugins(PluginsRequest) returns (PluginsResponse); + // Server returns information about the containerd server + rpc Server(google.protobuf.Empty) returns (ServerResponse); } message Plugin { @@ -79,3 +82,7 @@ message PluginsRequest { message PluginsResponse { repeated Plugin plugins = 1 [(gogoproto.nullable) = false]; } + +message ServerResponse { + string uuid = 1 [(gogoproto.customname) = "UUID"]; +} diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go index 3cf21383ec81c..9a3313ef5b3be 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go @@ -275,6 +275,207 @@ func (m *ListResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ListResponse proto.InternalMessageInfo +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // For snapshotter resource, there are many snapshotter types here, like + // overlayfs, devmapper etc. The type will be formatted with type, + // like "snapshotter/overlayfs". + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{6} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return m.Size() +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +type AddResourceRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddResourceRequest) Reset() { *m = AddResourceRequest{} } +func (*AddResourceRequest) ProtoMessage() {} +func (*AddResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{7} +} +func (m *AddResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddResourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddResourceRequest.Merge(m, src) +} +func (m *AddResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *AddResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddResourceRequest proto.InternalMessageInfo + +type DeleteResourceRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResourceRequest) Reset() { *m = DeleteResourceRequest{} } +func (*DeleteResourceRequest) ProtoMessage() {} +func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{8} +} +func (m *DeleteResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteResourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResourceRequest.Merge(m, src) +} +func (m *DeleteResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResourceRequest proto.InternalMessageInfo + +type ListResourcesRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcesRequest) Reset() { *m = ListResourcesRequest{} } +func (*ListResourcesRequest) ProtoMessage() {} +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{9} +} +func (m *ListResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcesRequest.Merge(m, src) +} +func (m *ListResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *ListResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcesRequest proto.InternalMessageInfo + +type ListResourcesResponse struct { + Resources []Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResourcesResponse) Reset() { *m = ListResourcesResponse{} } +func (*ListResourcesResponse) ProtoMessage() {} +func (*ListResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fefd70dfe8d93cbf, []int{10} +} +func (m *ListResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResourcesResponse.Merge(m, src) +} +func (m *ListResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *ListResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResourcesResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Lease)(nil), "containerd.services.leases.v1.Lease") proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.Lease.LabelsEntry") @@ -284,6 +485,11 @@ func init() { proto.RegisterType((*DeleteRequest)(nil), "containerd.services.leases.v1.DeleteRequest") proto.RegisterType((*ListRequest)(nil), "containerd.services.leases.v1.ListRequest") proto.RegisterType((*ListResponse)(nil), "containerd.services.leases.v1.ListResponse") + proto.RegisterType((*Resource)(nil), "containerd.services.leases.v1.Resource") + proto.RegisterType((*AddResourceRequest)(nil), "containerd.services.leases.v1.AddResourceRequest") + proto.RegisterType((*DeleteResourceRequest)(nil), "containerd.services.leases.v1.DeleteResourceRequest") + proto.RegisterType((*ListResourcesRequest)(nil), "containerd.services.leases.v1.ListResourcesRequest") + proto.RegisterType((*ListResourcesResponse)(nil), "containerd.services.leases.v1.ListResourcesResponse") } func init() { @@ -291,40 +497,48 @@ func init() { } var fileDescriptor_fefd70dfe8d93cbf = []byte{ - // 515 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40, - 0x14, 0xc6, 0x3b, 0xe9, 0x36, 0x6e, 0x4f, 0x5d, 0x91, 0x61, 0x59, 0x4a, 0xc4, 0xb4, 0x04, 0xc1, - 0xe2, 0x9f, 0x89, 0x5b, 0x6f, 0xd6, 0x5d, 0x11, 0xec, 0x76, 0x41, 0x21, 0x88, 0x04, 0x2f, 0x16, - 0x6f, 0x96, 0x34, 0x3d, 0x1b, 0x83, 0x69, 0x12, 0x33, 0xd3, 0x42, 0xef, 0x7c, 0x04, 0x1f, 0xc1, - 0x87, 0xf0, 0x21, 0x7a, 0xe9, 0xa5, 0x57, 0xab, 0x9b, 0x3b, 0xdf, 0x42, 0x32, 0x93, 0xb0, 0x7f, - 0x44, 0x5b, 0x65, 0xef, 0xce, 0xcc, 0x7c, 0xdf, 0x99, 0xdf, 0xf9, 0xc2, 0x04, 0x86, 0x41, 0x28, - 0xde, 0x4d, 0x47, 0xcc, 0x4f, 0x26, 0xb6, 0x9f, 0xc4, 0xc2, 0x0b, 0x63, 0xcc, 0xc6, 0xe7, 0x4b, - 0x2f, 0x0d, 0x6d, 0x8e, 0xd9, 0x2c, 0xf4, 0x91, 0xdb, 0x11, 0x7a, 0x1c, 0xb9, 0x3d, 0xdb, 0x2e, - 0x2b, 0x96, 0x66, 0x89, 0x48, 0xe8, 0xed, 0x33, 0x3d, 0xab, 0xb4, 0xac, 0x54, 0xcc, 0xb6, 0x8d, - 0xcd, 0x20, 0x09, 0x12, 0xa9, 0xb4, 0x8b, 0x4a, 0x99, 0x8c, 0x5b, 0x41, 0x92, 0x04, 0x11, 0xda, - 0x72, 0x35, 0x9a, 0x1e, 0xdb, 0x38, 0x49, 0xc5, 0xbc, 0x3c, 0xec, 0x5c, 0x3e, 0x14, 0xe1, 0x04, - 0xb9, 0xf0, 0x26, 0xa9, 0x12, 0x58, 0x3f, 0x09, 0x34, 0x9c, 0xe2, 0x06, 0xba, 0x05, 0x5a, 0x38, - 0x6e, 0x93, 0x2e, 0xe9, 0x35, 0x07, 0x7a, 0x7e, 0xd2, 0xd1, 0x5e, 0x0e, 0x5d, 0x2d, 0x1c, 0xd3, - 0x7d, 0x00, 0x3f, 0x43, 0x4f, 0xe0, 0xf8, 0xc8, 0x13, 0x6d, 0xad, 0x4b, 0x7a, 0xad, 0xbe, 0xc1, - 0x54, 0x5f, 0x56, 0xf5, 0x65, 0x6f, 0xaa, 0xbe, 0x83, 0xf5, 0xc5, 0x49, 0xa7, 0xf6, 0xe9, 0x7b, - 0x87, 0xb8, 0xcd, 0xd2, 0xf7, 0x5c, 0xd0, 0x17, 0xa0, 0x47, 0xde, 0x08, 0x23, 0xde, 0xae, 0x77, - 0xeb, 0xbd, 0x56, 0xff, 0x11, 0xfb, 0xeb, 0xa8, 0x4c, 0x22, 0x31, 0x47, 0x5a, 0x0e, 0x62, 0x91, - 0xcd, 0xdd, 0xd2, 0x6f, 0x3c, 0x81, 0xd6, 0xb9, 0x6d, 0x7a, 0x13, 0xea, 0xef, 0x71, 0xae, 0xb0, - 0xdd, 0xa2, 0xa4, 0x9b, 0xd0, 0x98, 0x79, 0xd1, 0x14, 0x25, 0x6a, 0xd3, 0x55, 0x8b, 0x5d, 0x6d, - 0x87, 0x58, 0x5f, 0x08, 0x6c, 0xec, 0x4b, 0x24, 0x17, 0x3f, 0x4c, 0x91, 0x8b, 0x3f, 0xce, 0xfc, - 0xfa, 0x12, 0xee, 0xce, 0x12, 0xdc, 0x0b, 0x5d, 0xaf, 0x1a, 0xdb, 0x81, 0x1b, 0x55, 0x7f, 0x9e, - 0x26, 0x31, 0x47, 0xba, 0x0b, 0x0d, 0x79, 0xb7, 0xf4, 0xb7, 0xfa, 0x77, 0x56, 0x09, 0xd3, 0x55, - 0x16, 0x6b, 0x0f, 0x36, 0x86, 0x18, 0xe1, 0xf2, 0x0c, 0x28, 0xac, 0xf1, 0x79, 0xec, 0x4b, 0x9e, - 0x75, 0x57, 0xd6, 0xd6, 0x5d, 0x68, 0x39, 0x21, 0x17, 0x95, 0xb5, 0x0d, 0xd7, 0x8e, 0xc3, 0x48, - 0x60, 0xc6, 0xdb, 0xa4, 0x5b, 0xef, 0x35, 0xdd, 0x6a, 0x69, 0x39, 0x70, 0x5d, 0x09, 0x4b, 0xe2, - 0xa7, 0xa0, 0x2b, 0x1e, 0x29, 0x5c, 0x15, 0xb9, 0xf4, 0xf4, 0x3f, 0x6b, 0xa0, 0xcb, 0x1d, 0x4e, - 0x11, 0x74, 0x15, 0x06, 0x7d, 0xf0, 0x2f, 0xdf, 0xc4, 0x78, 0xb8, 0xa2, 0xba, 0xe4, 0x7d, 0x05, - 0xba, 0x4a, 0x69, 0xe9, 0x35, 0x17, 0xc2, 0x34, 0xb6, 0x7e, 0x7b, 0x18, 0x07, 0xc5, 0x6b, 0xa4, - 0x47, 0xb0, 0x56, 0xe4, 0x41, 0xef, 0x2d, 0x9b, 0xfb, 0x2c, 0x5d, 0xe3, 0xfe, 0x4a, 0x5a, 0x05, - 0x3c, 0x38, 0x5c, 0x9c, 0x9a, 0xb5, 0x6f, 0xa7, 0x66, 0xed, 0x63, 0x6e, 0x92, 0x45, 0x6e, 0x92, - 0xaf, 0xb9, 0x49, 0x7e, 0xe4, 0x26, 0x79, 0xfb, 0xec, 0x3f, 0x7f, 0x4d, 0x7b, 0xaa, 0x3a, 0xac, - 0x8d, 0x74, 0x39, 0xcc, 0xe3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x74, 0xdd, 0x12, 0xe5, - 0x04, 0x00, 0x00, + // 644 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xce, 0x26, 0xa9, 0x49, 0x26, 0xb4, 0x42, 0xab, 0xb6, 0x8a, 0x8c, 0x48, 0x22, 0x0b, 0xa9, + 0x11, 0x3f, 0x36, 0x4d, 0x2b, 0x54, 0x5a, 0x84, 0xd4, 0xb4, 0x95, 0xa8, 0x88, 0x10, 0xb2, 0x38, + 0x54, 0x1c, 0xa8, 0x1c, 0x7b, 0x1b, 0x2c, 0x9c, 0xd8, 0x78, 0x37, 0x41, 0xe9, 0x89, 0x47, 0xe0, + 0x61, 0x78, 0x88, 0x1e, 0x39, 0x21, 0x4e, 0x85, 0xe6, 0xc6, 0x5b, 0x20, 0xef, 0x0f, 0x6d, 0x5a, + 0xb5, 0x76, 0x11, 0xe2, 0x36, 0x1b, 0x7f, 0xdf, 0xcc, 0x37, 0x33, 0xdf, 0x6e, 0x60, 0xbb, 0xe7, + 0xb3, 0x77, 0xc3, 0xae, 0xe9, 0x86, 0x7d, 0xcb, 0x0d, 0x07, 0xcc, 0xf1, 0x07, 0x24, 0xf6, 0xce, + 0x86, 0x4e, 0xe4, 0x5b, 0x94, 0xc4, 0x23, 0xdf, 0x25, 0xd4, 0x0a, 0x88, 0x43, 0x09, 0xb5, 0x46, + 0xcb, 0x32, 0x32, 0xa3, 0x38, 0x64, 0x21, 0xbe, 0x73, 0x8a, 0x37, 0x15, 0xd6, 0x94, 0x88, 0xd1, + 0xb2, 0x3e, 0xdf, 0x0b, 0x7b, 0x21, 0x47, 0x5a, 0x49, 0x24, 0x48, 0xfa, 0xed, 0x5e, 0x18, 0xf6, + 0x02, 0x62, 0xf1, 0x53, 0x77, 0x78, 0x60, 0x91, 0x7e, 0xc4, 0xc6, 0xf2, 0x63, 0xfd, 0xfc, 0x47, + 0xe6, 0xf7, 0x09, 0x65, 0x4e, 0x3f, 0x12, 0x00, 0xe3, 0x17, 0x82, 0x99, 0x4e, 0x52, 0x01, 0x2f, + 0x42, 0xde, 0xf7, 0xaa, 0xa8, 0x81, 0x9a, 0xe5, 0xb6, 0x36, 0x39, 0xae, 0xe7, 0x77, 0xb7, 0xed, + 0xbc, 0xef, 0xe1, 0x2d, 0x00, 0x37, 0x26, 0x0e, 0x23, 0xde, 0xbe, 0xc3, 0xaa, 0xf9, 0x06, 0x6a, + 0x56, 0x5a, 0xba, 0x29, 0xf2, 0x9a, 0x2a, 0xaf, 0xf9, 0x5a, 0xe5, 0x6d, 0x97, 0x8e, 0x8e, 0xeb, + 0xb9, 0xcf, 0x3f, 0xea, 0xc8, 0x2e, 0x4b, 0xde, 0x26, 0xc3, 0xcf, 0x41, 0x0b, 0x9c, 0x2e, 0x09, + 0x68, 0xb5, 0xd0, 0x28, 0x34, 0x2b, 0xad, 0x47, 0xe6, 0x95, 0xad, 0x9a, 0x5c, 0x92, 0xd9, 0xe1, + 0x94, 0x9d, 0x01, 0x8b, 0xc7, 0xb6, 0xe4, 0xeb, 0x4f, 0xa0, 0x72, 0xe6, 0x67, 0x7c, 0x0b, 0x0a, + 0xef, 0xc9, 0x58, 0xc8, 0xb6, 0x93, 0x10, 0xcf, 0xc3, 0xcc, 0xc8, 0x09, 0x86, 0x84, 0x4b, 0x2d, + 0xdb, 0xe2, 0xb0, 0x9e, 0x5f, 0x43, 0xc6, 0x17, 0x04, 0xb3, 0x5b, 0x5c, 0x92, 0x4d, 0x3e, 0x0c, + 0x09, 0x65, 0x97, 0xf6, 0xfc, 0xea, 0x9c, 0xdc, 0xb5, 0x14, 0xb9, 0x53, 0x59, 0xff, 0xb5, 0xec, + 0x0e, 0xcc, 0xa9, 0xfc, 0x34, 0x0a, 0x07, 0x94, 0xe0, 0x75, 0x98, 0xe1, 0xb5, 0x39, 0xbf, 0xd2, + 0xba, 0x9b, 0x65, 0x98, 0xb6, 0xa0, 0x18, 0x1b, 0x30, 0xbb, 0x4d, 0x02, 0x92, 0x3e, 0x03, 0x0c, + 0x45, 0x3a, 0x1e, 0xb8, 0x5c, 0x4f, 0xc9, 0xe6, 0xb1, 0xb1, 0x04, 0x95, 0x8e, 0x4f, 0x99, 0xa2, + 0x56, 0xe1, 0xc6, 0x81, 0x1f, 0x30, 0x12, 0xd3, 0x2a, 0x6a, 0x14, 0x9a, 0x65, 0x5b, 0x1d, 0x8d, + 0x0e, 0xdc, 0x14, 0x40, 0xa9, 0xf8, 0x29, 0x68, 0x42, 0x0f, 0x07, 0x66, 0x95, 0x2c, 0x39, 0xc6, + 0x63, 0x28, 0xd9, 0x84, 0x86, 0xc3, 0xd8, 0x25, 0x57, 0xc9, 0x65, 0xe3, 0x48, 0x8d, 0x8f, 0xc7, + 0xc6, 0x47, 0xc0, 0x9b, 0x9e, 0xa7, 0xa8, 0x69, 0x0d, 0xef, 0x42, 0x29, 0x96, 0x50, 0x69, 0xf3, + 0xa5, 0x14, 0x95, 0x2a, 0x73, 0xbb, 0x98, 0x78, 0xde, 0xfe, 0x43, 0x37, 0x0e, 0x61, 0x41, 0x0d, + 0xf9, 0xbf, 0xd7, 0x36, 0x61, 0x5e, 0x8e, 0x9e, 0x9f, 0x69, 0x4a, 0x69, 0xc3, 0x83, 0x85, 0x73, + 0x78, 0xb9, 0xb3, 0x17, 0x50, 0x56, 0x49, 0xd5, 0xda, 0xae, 0x29, 0xea, 0x94, 0xdf, 0xfa, 0x56, + 0x04, 0x8d, 0x2f, 0x95, 0x62, 0x02, 0x9a, 0xf0, 0x33, 0x7e, 0x70, 0x9d, 0x6b, 0xa5, 0x3f, 0xcc, + 0x88, 0x96, 0xf2, 0x5f, 0x82, 0x26, 0x76, 0x90, 0x5a, 0x66, 0xea, 0x3e, 0xe8, 0x8b, 0x17, 0xde, + 0xb6, 0x9d, 0xe4, 0x41, 0xc5, 0xfb, 0x50, 0x4c, 0xe6, 0x84, 0xef, 0xa5, 0x59, 0xf7, 0xf4, 0x82, + 0xe8, 0xf7, 0x33, 0x61, 0xa5, 0xe0, 0x3d, 0xa8, 0x9c, 0x71, 0x2b, 0x5e, 0x4e, 0xe1, 0x5e, 0x74, + 0xf6, 0xa5, 0xd2, 0xdf, 0xc2, 0xdc, 0xb4, 0x1d, 0xf1, 0x6a, 0xc6, 0x91, 0x64, 0xcb, 0x7f, 0x08, + 0xb3, 0x53, 0x16, 0xc2, 0x2b, 0xd9, 0xfa, 0x9e, 0x32, 0xa8, 0xbe, 0x7a, 0x3d, 0x92, 0x98, 0x5a, + 0x7b, 0xef, 0xe8, 0xa4, 0x96, 0xfb, 0x7e, 0x52, 0xcb, 0x7d, 0x9a, 0xd4, 0xd0, 0xd1, 0xa4, 0x86, + 0xbe, 0x4e, 0x6a, 0xe8, 0xe7, 0xa4, 0x86, 0xde, 0x3c, 0xfb, 0xcb, 0xff, 0xe4, 0x0d, 0x11, 0xed, + 0xe5, 0xba, 0x1a, 0xef, 0x73, 0xe5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xfe, 0x39, 0x67, + 0xde, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -349,6 +563,12 @@ type LeasesClient interface { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) } type leasesClient struct { @@ -386,6 +606,33 @@ func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.C return out, nil } +func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // LeasesServer is the server API for Leases service. type LeasesServer interface { // Create creates a new lease for managing changes to metadata. A lease @@ -398,6 +645,12 @@ type LeasesServer interface { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(context.Context, *ListRequest) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(context.Context, *AddResourceRequest) (*types.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(context.Context, *DeleteResourceRequest) (*types.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) } func RegisterLeasesServer(s *grpc.Server, srv LeasesServer) { @@ -458,6 +711,60 @@ func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interfa return interceptor(ctx, in, info, handler) } +func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).AddResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/AddResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).DeleteResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/ListResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Leases_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.leases.v1.Leases", HandlerType: (*LeasesServer)(nil), @@ -474,6 +781,18 @@ var _Leases_serviceDesc = grpc.ServiceDesc{ MethodName: "List", Handler: _Leases_List_Handler, }, + { + MethodName: "AddResource", + Handler: _Leases_AddResource_Handler, + }, + { + MethodName: "DeleteResource", + Handler: _Leases_DeleteResource_Handler, + }, + { + MethodName: "ListResources", + Handler: _Leases_ListResources_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", @@ -712,61 +1031,224 @@ func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovLeases(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } + if len(m.Type) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *CreateRequest) Size() (n int) { - if m == nil { - return 0 +func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *AddResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size())) + n3, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintLeases(dAtA, i, uint64(m.Resource.Size())) + n4, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintLeases(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovLeases(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) + n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) + n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } return n } @@ -842,6 +1324,96 @@ func (m *ListResponse) Size() (n int) { return n } +func (m *Resource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AddResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = m.Resource.Size() + n += 1 + l + sovLeases(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + l = m.Resource.Size() + n += 1 + l + sovLeases(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLeases(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovLeases(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovLeases(x uint64) (n int) { for { n++ @@ -945,29 +1517,87 @@ func (this *ListResponse) String() string { }, "") return s } -func valueToStringLeases(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { +func (this *Resource) String() string { + if this == nil { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s } -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ +func (this *AddResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddResourceRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteResourceRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesResponse{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringLeases(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break @@ -1172,7 +1802,590 @@ func (m *Lease) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Labels[mapkey] = mapvalue + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthLeases + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthLeases + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lease == nil { + m.Lease = &Lease{} + } + if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &Lease{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1199,7 +2412,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateRequest) Unmarshal(dAtA []byte) error { +func (m *Resource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1222,10 +2435,10 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Resource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1260,11 +2473,11 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -1274,118 +2487,23 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthLeases } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthLeases } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1412,7 +2530,7 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateResponse) Unmarshal(dAtA []byte) error { +func (m *AddResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1435,15 +2553,47 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AddResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AddResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1470,10 +2620,7 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lease == nil { - m.Lease = &Lease{} - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1502,7 +2649,7 @@ func (m *CreateResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { +func (m *DeleteResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1525,10 +2672,10 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1564,10 +2711,10 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -1577,12 +2724,25 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Sync = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthLeases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLeases(dAtA[iNdEx:]) @@ -1608,7 +2768,7 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListRequest) Unmarshal(dAtA []byte) error { +func (m *ListResourcesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1631,15 +2791,15 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ListResourcesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1667,7 +2827,7 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1694,7 +2854,7 @@ func (m *ListRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListResponse) Unmarshal(dAtA []byte) error { +func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1717,15 +2877,15 @@ func (m *ListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ListResourcesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1752,8 +2912,8 @@ func (m *ListResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Leases = append(m.Leases, &Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto index 2df4b06239bc9..ac693e93ded32 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto @@ -22,6 +22,15 @@ service Leases { // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. rpc List(ListRequest) returns (ListResponse); + + // AddResource references the resource by the provided lease. + rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty); + + // DeleteResource dereferences the resource by the provided lease. + rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty); + + // ListResources lists all the resources referenced by the lease. + rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); } // Lease is an object which retains resources while it exists. @@ -62,3 +71,32 @@ message ListRequest { message ListResponse { repeated Lease leases = 1; } + +message Resource { + string id = 1; + + // For snapshotter resource, there are many snapshotter types here, like + // overlayfs, devmapper etc. The type will be formatted with type, + // like "snapshotter/overlayfs". + string type = 2; +} + +message AddResourceRequest { + string id = 1; + + Resource resource = 2 [(gogoproto.nullable) = false]; +} + +message DeleteResourceRequest { + string id = 1; + + Resource resource = 2 [(gogoproto.nullable) = false]; +} + +message ListResourcesRequest { + string id = 1; +} + +message ListResourcesResponse { + repeated Resource resources = 1 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 60c80e98a5954..2338de6b90362 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { switch compression { case Uncompressed: diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index fd8d98bf30935..e05ca719c2e6e 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error { utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") + return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path) } return nil diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 133bfcdbe8ee5..c7cf4f0bcb9b8 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,10 +18,13 @@ package cio import ( "context" + "errors" "fmt" "io" "net/url" "os" + "path/filepath" + "strings" "sync" "github.com/containerd/containerd/defaults" @@ -242,17 +245,24 @@ func LogURI(uri *url.URL) Creator { // BinaryIO forwards container STDOUT|STDERR directly to a logging binary func BinaryIO(binary string, args map[string]string) Creator { return func(_ string) (IO, error) { + binary = filepath.Clean(binary) + if !strings.HasPrefix(binary, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "binary", - Host: binary, + Path: binary, } + q := uri.Query() for k, v := range args { - uri.Query().Set(k, v) + q.Set(k, v) } + uri.RawQuery = q.Encode() + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } @@ -262,14 +272,19 @@ func BinaryIO(binary string, args map[string]string) Creator { // If the log file already exists, the logs will be appended to the file. func LogFile(path string) Creator { return func(_ string) (IO, error) { + path = filepath.Clean(path) + if !strings.HasPrefix(path, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "file", - Host: path, + Path: path, } + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index ff78f7e776437..8ea7d79729ff5 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -43,6 +43,7 @@ import ( "github.com/containerd/containerd/content" contentproxy "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" @@ -56,6 +57,7 @@ import ( "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -86,13 +88,17 @@ func New(address string, opts ...ClientOpt) (*Client, error) { if copts.timeout == 0 { copts.timeout = 10 * time.Second } - rt := fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS) - if copts.defaultRuntime != "" { - rt = copts.defaultRuntime - } + c := &Client{ - runtime: rt, + defaultns: copts.defaultns, } + + if copts.defaultRuntime != "" { + c.runtime = copts.defaultRuntime + } else { + c.runtime = defaults.DefaultRuntime + } + if copts.services != nil { c.services = *copts.services } @@ -102,7 +108,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) { grpc.WithInsecure(), grpc.FailOnNonTempDialError(true), grpc.WithBackoffMaxDelay(3 * time.Second), - grpc.WithDialer(dialer.Dialer), + grpc.WithContextDialer(dialer.ContextDialer), // TODO(stevvooe): We may need to allow configuration of this on the client. grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), @@ -134,19 +140,15 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.conn, c.connector = conn, connector } if copts.services == nil && c.conn == nil { - return nil, errors.New("no grpc connection or services is available") + return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available") } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -163,20 +165,17 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { } } c := &Client{ - conn: conn, - runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), + defaultns: copts.defaultns, + conn: conn, + runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -193,13 +192,14 @@ type Client struct { connMu sync.Mutex conn *grpc.ClientConn runtime string + defaultns string connector func() (*grpc.ClientConn, error) } // Reconnect re-establishes the GRPC connection to the containerd daemon func (c *Client) Reconnect() error { if c.connector == nil { - return errors.New("unable to reconnect to containerd, no connector available") + return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available") } c.connMu.Lock() defer c.connMu.Unlock() @@ -222,10 +222,10 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return false, errors.New("no grpc connection available") + return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() - r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false)) + r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) if err != nil { return false, err } @@ -298,6 +298,9 @@ type RemoteContext struct { // afterwards. Unpacking is required to run an image. Unpack bool + // UnpackOpts handles options to the unpack call. + UnpackOpts []UnpackOpt + // Snapshotter used for unpacking Snapshotter string @@ -339,7 +342,6 @@ func defaultRemoteContext() *RemoteContext { Resolver: docker.NewResolver(docker.ResolverOptions{ Client: http.DefaultClient, }), - Snapshotter: DefaultSnapshotter, } } @@ -354,7 +356,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag } if fetchCtx.Unpack { - return images.Image{}, errors.New("unpack on fetch not supported, try pull") + return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull") } if fetchCtx.PlatformMatcher == nil { @@ -407,6 +409,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, } } + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(ref, "@") { + ref = ref + "@" + desc.Digest.String() + } + pusher, err := pushCtx.Resolver.Pusher(ctx, ref) if err != nil { return err @@ -490,6 +497,27 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels)) } +// GetLabel gets a label value from namespace store +// If there is no default label, an empty string returned with nil error +func (c *Client) GetLabel(ctx context.Context, label string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + if c.defaultns == "" { + return "", err + } + ns = c.defaultns + } + + srv := c.NamespaceService() + labels, err := srv.Labels(ctx, ns) + if err != nil { + return "", err + } + + value := labels[label] + return value, nil +} + // Subscribe to events that match one or more of the provided filters. // // Callers should listen on both the envelope and errs channels. If the errs @@ -543,6 +571,10 @@ func (c *Client) ContentStore() content.Store { // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName) + if err != nil { + snapshotterName = DefaultSnapshotter + } if c.snapshotters != nil { return c.snapshotters[snapshotterName] } @@ -642,7 +674,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return Version{}, errors.New("no grpc connection available") + return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) @@ -655,6 +687,58 @@ func (c *Client) Version(ctx context.Context) (Version, error) { }, nil } +type ServerInfo struct { + UUID string +} + +func (c *Client) Server(ctx context.Context) (ServerInfo, error) { + c.connMu.Lock() + if c.conn == nil { + c.connMu.Unlock() + return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + } + c.connMu.Unlock() + + response, err := c.IntrospectionService().Server(ctx, &types.Empty{}) + if err != nil { + return ServerInfo{}, err + } + return ServerInfo{ + UUID: response.UUID, + }, nil +} + +func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) { + if name == "" { + label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel) + if err != nil { + return "", err + } + + if label != "" { + name = label + } else { + name = DefaultSnapshotter + } + } + + return name, nil +} + +func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) { + name, err := c.resolveSnapshotterName(ctx, name) + if err != nil { + return nil, err + } + + s := c.SnapshotService(name) + if s == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name) + } + + return s, nil +} + // CheckRuntime returns true if the current runtime matches the expected // runtime. Providing various parts of the runtime schema will match those // parts of the expected runtime diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 2073d40b45d6e..46d51ecd919a3 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -49,7 +49,7 @@ type Container interface { // ID identifies the container ID() string // Info returns the underlying container record type - Info(context.Context) (containers.Container, error) + Info(context.Context, ...InfoOpts) (containers.Container, error) // Delete removes the container Delete(context.Context, ...DeleteOpts) error // NewTask creates a new task based on the container metadata @@ -80,16 +80,18 @@ type Container interface { func containerFromRecord(client *Client, c containers.Container) *container { return &container{ - client: client, - id: c.ID, + client: client, + id: c.ID, + metadata: c, } } var _ = (Container)(&container{}) type container struct { - client *Client - id string + client *Client + id string + metadata containers.Container } // ID returns the container's unique id @@ -97,8 +99,22 @@ func (c *container) ID() string { return c.id } -func (c *container) Info(ctx context.Context) (containers.Container, error) { - return c.get(ctx) +func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Container, error) { + i := &InfoConfig{ + // default to refreshing the container's local metadata + Refresh: true, + } + for _, o := range opts { + o(i) + } + if i.Refresh { + metadata, err := c.get(ctx) + if err != nil { + return c.metadata, err + } + c.metadata = metadata + } + return c.metadata, nil } func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) { @@ -217,7 +233,11 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N } // get the rootfs from the snapshotter and add it to the request - mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey) + s, err := c.client.getSnapshotter(ctx, r.Snapshotter) + if err != nil { + return nil, err + } + mounts, err := s.Mounts(ctx, r.SnapshotKey) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 1ce989432634b..e36b47e2e8098 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -20,9 +20,7 @@ import ( "context" "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" @@ -41,6 +39,15 @@ type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Co // UpdateContainerOpts allows the caller to set additional options when updating a container type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error +// InfoOpts controls how container metadata is fetched and returned +type InfoOpts func(*InfoConfig) + +// InfoConfig specifies how container metadata is fetched +type InfoConfig struct { + // Refresh will to a fetch of the latest container metadata + Refresh bool +} + // WithRuntime allows a user to specify the runtime name and additional options that should // be used to create tasks for the container func WithRuntime(name string, options interface{}) NewContainerOpts { @@ -109,9 +116,17 @@ func WithSnapshotter(name string) NewContainerOpts { // WithSnapshot uses an existing root filesystem for the container func WithSnapshot(id string) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - setSnapshotterIfEmpty(ctx, client, c) // check that the snapshot exists, if not, fail on creation - if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil { + var err error + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Mounts(ctx, id); err != nil { return err } c.SnapshotKey = id @@ -123,13 +138,21 @@ func WithSnapshot(id string) NewContainerOpts { // root filesystem in read-write mode func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.RootFS(ctx) if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -144,7 +167,13 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta if c.Snapshotter == "" { return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot") } - return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey) + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) { + return err + } } return nil } @@ -157,9 +186,17 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.View(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -168,21 +205,6 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer } } -func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) { - if c.Snapshotter == "" { - defaultSnapshotter := DefaultSnapshotter - namespaceService := client.NamespaceService() - if ns, err := namespaces.NamespaceRequired(ctx); err == nil { - if labels, err := namespaceService.Labels(ctx, ns); err == nil { - if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok { - defaultSnapshotter = snapshotLabel - } - } - } - c.Snapshotter = defaultSnapshotter - } -} - // WithContainerExtension appends extension data to the container object. // Use this to decorate the container object with additional data for the client // integration. @@ -235,3 +257,8 @@ func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts { return err } } + +// WithoutRefreshedMetadata will use the current metadata attached to the container object +func WithoutRefreshedMetadata(i *InfoConfig) { + i.Refresh = false +} diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index 340a9185728c2..af52d042209c6 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -50,13 +50,18 @@ func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool return err } - setSnapshotterIfEmpty(ctx, client, c) - var ( - snapshotter = client.SnapshotService(c.Snapshotter) - parent = identity.ChainID(diffIDs).String() - usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) + parent = identity.ChainID(diffIDs).String() + usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) ) + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } if _, err := snapshotter.Stat(ctx, usernsID); err == nil { if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { c.SnapshotKey = id diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 3e231408d55ec..c1c2046186a82 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -55,7 +55,14 @@ func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ( p := make([]byte, ra.Size()) - _, err = ra.ReadAt(p, 0) + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } return p, err } @@ -162,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { return err } +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + // seekReader attempts to seek the reader to the given offset, either by // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding // up to the given offset. diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 5503cb56f91ad..efc58ea79ec2c 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -35,7 +35,6 @@ import ( "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" - "github.com/containerd/continuity" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -661,6 +660,19 @@ func writeTimestampFile(p string, t time.Time) error { if err != nil { return err } + return atomicWrite(p, b, 0666) +} - return continuity.AtomicWriteFile(p, b, 0666) +func atomicWrite(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return errors.Wrap(err, "create tmp file") + } + _, err = f.Write(data) + f.Close() + if err != nil { + return errors.Wrap(err, "write atomic data") + } + return os.Rename(tmp, path) } diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go index 011139d169990..042052792e606 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go @@ -20,7 +20,8 @@ package seccomp import ( "runtime" - "syscall" + + "golang.org/x/sys/unix" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -555,7 +556,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Args: []specs.LinuxSeccompArg{ { Index: 1, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, @@ -570,7 +571,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { Args: []specs.LinuxSeccompArg{ { Index: 0, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, ValueTwo: 0, Op: specs.OpMaskedEqual, }, diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go index 3a748e4e80e20..6f5b122ecf936 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults.go @@ -23,10 +23,10 @@ const ( // DefaultMaxSendMsgSize defines the default maximum message size for // sending protobufs passed over the GRPC API. DefaultMaxSendMsgSize = 16 << 20 - // DefaultRuntimeNSLabel defines the namespace label to check for + // DefaultRuntimeNSLabel defines the namespace label to check for the // default runtime DefaultRuntimeNSLabel = "containerd.io/defaults/runtime" - // DefaultSnapshotterNSLabel defines the namespances label to check for + // DefaultSnapshotterNSLabel defines the namespace label to check for the // default snapshotter DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 30ed42235ef0c..319e8777bf869 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -32,4 +32,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 16f1048ca6ff3..5eede8de83b81 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -40,4 +40,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" ) diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 4d890ce2b969a..445df019220c8 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -45,10 +45,17 @@ type diffRemote struct { client diffapi.DiffClient } -func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) { +func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { + var config diff.ApplyConfig + for _, opt := range opts { + if err := opt(ctx, desc, &config); err != nil { + return ocispec.Descriptor{}, err + } + } req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(diff), - Mounts: fromMounts(mounts), + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: config.ProcessorPayloads, } resp, err := r.client.Apply(ctx, req) if err != nil { diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go index d5b4ff45d39ab..7a6b65c3e0eba 100644 --- a/vendor/github.com/containerd/containerd/diff/apply/apply.go +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -23,11 +23,8 @@ import ( "time" "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" @@ -53,7 +50,7 @@ var emptyDesc = ocispec.Descriptor{} // Apply applies the content associated with the provided digests onto the // provided mounts. Archive content will be extracted and decompressed if // necessary. -func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) { +func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { t1 := time.Now() defer func() { if err == nil { @@ -66,54 +63,63 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ } }() - isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType) - if err != nil { - return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) - } - - var ocidesc ocispec.Descriptor - if err := mount.WithTempMount(ctx, mounts, func(root string) error { - ra, err := s.store.ReaderAt(ctx, desc) - if err != nil { - return errors.Wrap(err, "failed to get reader from content store") + var config diff.ApplyConfig + for _, o := range opts { + if err := o(ctx, desc, &config); err != nil { + return emptyDesc, errors.Wrap(err, "failed to apply config opt") } - defer ra.Close() + } - r := content.NewReader(ra) - if isCompressed { - ds, err := compression.DecompressStream(r) - if err != nil { - return err - } - defer ds.Close() - r = ds + ra, err := s.store.ReaderAt(ctx, desc) + if err != nil { + return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + var processors []diff.StreamProcessor + processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) + processors = append(processors, processor) + for { + if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { + return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) } - - digester := digest.Canonical.Digester() - rc := &readCounter{ - r: io.TeeReader(r, digester.Hash()), + processors = append(processors, processor) + if processor.MediaType() == ocispec.MediaTypeImageLayer { + break } + } + defer processor.Close() + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(processor, digester.Hash()), + } + if err := mount.WithTempMount(ctx, mounts, func(root string) error { if _, err := archive.Apply(ctx, root, rc); err != nil { return err } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { - return err - } - - ocidesc = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageLayer, - Size: rc.c, - Digest: digester.Digest(), - } - return nil - + _, err := io.Copy(ioutil.Discard, rc) + return err }); err != nil { return emptyDesc, err } - return ocidesc, nil + + for _, p := range processors { + if ep, ok := p.(interface { + Err() error + }); ok { + if err := ep.Err(); err != nil { + return emptyDesc, err + } + } + } + return ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + }, nil } type readCounter struct { diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index 2b6f01c74efb7..17aab616e543d 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -20,6 +20,7 @@ import ( "context" "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -51,6 +52,15 @@ type Comparer interface { Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error) } +// ApplyConfig is used to hold parameters needed for a apply operation +type ApplyConfig struct { + // ProcessorPayloads specifies the payload sent to various processors + ProcessorPayloads map[string]*types.Any +} + +// ApplyOpt is used to configure an Apply operation +type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error + // Applier allows applying diffs between mounts type Applier interface { // Apply applies the content referred to by the given descriptor to @@ -58,7 +68,7 @@ type Applier interface { // implementation and content descriptor. For example, in the common // case the descriptor is a file system difference in tar format, // that tar would be applied on top of the mounts. - Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount) (ocispec.Descriptor, error) + Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error) } // WithMediaType sets the media type to use for creating the diff, without @@ -87,3 +97,11 @@ func WithLabels(labels map[string]string) Opt { return nil } } + +// WithPayloads sets the apply processor payloads to the config +func WithPayloads(payloads map[string]*types.Any) ApplyOpt { + return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { + c.ProcessorPayloads = payloads + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go new file mode 100644 index 0000000000000..4b8f27f14979a --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + "io" + "os" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/images" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + handlers []Handler + + // ErrNoProcessor is returned when no stream processor is available for a media-type + ErrNoProcessor = errors.New("no processor for media-type") +) + +func init() { + // register the default compression handler + RegisterProcessor(compressedHandler) +} + +// RegisterProcessor registers a stream processor for media-types +func RegisterProcessor(handler Handler) { + handlers = append(handlers, handler) +} + +// GetProcessor returns the processor for a media-type +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + // reverse this list so that user configured handlers come up first + for i := len(handlers) - 1; i >= 0; i-- { + processor, ok := handlers[i](ctx, stream.MediaType()) + if ok { + return processor(ctx, stream, payloads) + } + } + return nil, ErrNoProcessor +} + +// Handler checks a media-type and initializes the processor +type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) + +// StaticHandler returns the processor init func for a static media-type +func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { + return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + if mediaType == expectedMediaType { + return fn, true + } + return nil, false + } +} + +// StreamProcessorInit returns the initialized stream processor +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) + +// RawProcessor provides access to direct fd for processing +type RawProcessor interface { + // File returns the fd for the read stream of the underlying processor + File() *os.File +} + +// StreamProcessor handles processing a content stream and transforming it into a different media-type +type StreamProcessor interface { + io.ReadCloser + + // MediaType is the resulting media-type that the processor processes the stream into + MediaType() string +} + +func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + compressed, err := images.IsCompressedDiff(ctx, mediaType) + if err != nil { + return nil, false + } + if compressed { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + ds, err := compression.DecompressStream(stream) + if err != nil { + return nil, err + } + + return &compressedProcessor{ + rc: ds, + }, nil + }, true + } + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return &stdProcessor{ + rc: stream, + }, nil + }, true +} + +// NewProcessorChain initialized the root StreamProcessor +func NewProcessorChain(mt string, r io.Reader) StreamProcessor { + return &processorChain{ + mt: mt, + rc: r, + } +} + +type processorChain struct { + mt string + rc io.Reader +} + +func (c *processorChain) MediaType() string { + return c.mt +} + +func (c *processorChain) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *processorChain) Close() error { + return nil +} + +type stdProcessor struct { + rc StreamProcessor +} + +func (c *stdProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *stdProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *stdProcessor) Close() error { + return nil +} + +type compressedProcessor struct { + rc io.ReadCloser +} + +func (c *compressedProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *compressedProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *compressedProcessor) Close() error { + return c.rc.Close() +} + +func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler { + set := make(map[string]struct{}, len(mediaTypes)) + for _, m := range mediaTypes { + set[m] = struct{}{} + } + return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { + if _, ok := set[mediaType]; ok { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + payload := payloads[id] + return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload) + }, true + } + return nil, false + } +} + +const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE" diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go new file mode 100644 index 0000000000000..28f38d998a806 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -0,0 +1,146 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + var payloadC io.Closer + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + go func() { + io.Copy(w, bytes.NewReader(data)) + w.Close() + }() + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + payloadC = r + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + if payloadC != nil { + payloadC.Close() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go new file mode 100644 index 0000000000000..8dadd72c92c29 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -0,0 +1,165 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const processorPipe = "STREAM_PROCESSOR_PIPE" + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + up, err := getUiqPath() + if err != nil { + return nil, err + } + path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up) + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + go func() { + defer l.Close() + conn, err := l.Accept() + if err != nil { + logrus.WithError(err).Error("accept npipe connection") + return + } + io.Copy(conn, bytes.NewReader(data)) + conn.Close() + }() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path)) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} + +func getUiqPath() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + os.Remove(dir) + return filepath.Base(dir), nil +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go index a45a5630b5347..5ce35910c7033 100644 --- a/vendor/github.com/containerd/containerd/diff/walking/differ.go +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -106,14 +106,15 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o } }() if !newReference { - if err := cw.Truncate(0); err != nil { + if err = cw.Truncate(0); err != nil { return err } } if isCompressed { dgstr := digest.SHA256.Digester() - compressed, err := compression.CompressStream(cw, compression.Gzip) + var compressed io.WriteCloser + compressed, err = compression.CompressStream(cw, compression.Gzip) if err != nil { return errors.Wrap(err, "failed to get compressed stream") } @@ -149,7 +150,9 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if err != nil { return errors.Wrap(err, "failed to get info from content store") } - + if info.Labels == nil { + info.Labels = make(map[string]string) + } // Set uncompressed label if digest already existed without label if _, ok := info.Labels[uncompressed]; !ok { info.Labels[uncompressed] = config.Labels[uncompressed] diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 40427fc5a54e7..b5200afc0eee6 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -26,7 +26,11 @@ // client-side errors to the correct types. package errdefs -import "github.com/pkg/errors" +import ( + "context" + + "github.com/pkg/errors" +) // Definitions of common error types used throughout containerd. All containerd // errors returned by most packages will map into one of these errors classes. @@ -76,3 +80,14 @@ func IsUnavailable(err error) bool { func IsNotImplemented(err error) bool { return errors.Cause(err) == ErrNotImplemented } + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index b1542f13d6abf..209f63bd0fc0d 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -17,6 +17,7 @@ package errdefs import ( + "context" "strings" "github.com/pkg/errors" @@ -55,6 +56,10 @@ func ToGRPC(err error) error { return status.Errorf(codes.Unavailable, err.Error()) case IsNotImplemented(err): return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) } return err @@ -89,6 +94,10 @@ func FromGRPC(err error) error { cls = ErrFailedPrecondition case codes.Unimplemented: cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded default: cls = ErrUnknown } diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go index f5552231ee915..81f199226d4ae 100644 --- a/vendor/github.com/containerd/containerd/export.go +++ b/vendor/github.com/containerd/containerd/export.go @@ -20,26 +20,12 @@ import ( "context" "io" - "github.com/containerd/containerd/images/oci" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" + "github.com/containerd/containerd/images/archive" ) -// Export exports an image to a Tar stream. -// OCI format is used by default. -// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc. -// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream. -func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...oci.V1ExporterOpt) (io.ReadCloser, error) { - - exporter, err := oci.ResolveV1ExportOpt(opts...) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed")) - }() - return pr, nil +// Export exports images to a Tar stream. +// The tar archive is in OCI format with a Docker compatible manifest +// when a single target platform is given. +func (c *Client) Export(ctx context.Context, w io.Writer, opts ...archive.ExportOpt) error { + return archive.Export(ctx, c.ContentStore(), w, opts...) } diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go index 35a1712cb3e4b..c6fcf79103cd1 100644 --- a/vendor/github.com/containerd/containerd/gc/gc.go +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -30,6 +30,11 @@ import ( // ResourceType represents type of resource at a node type ResourceType uint8 +// ResourceMax represents the max resource. +// Upper bits are stripped out during the mark phase, allowing the upper 3 bits +// to be used by the caller reference function. +const ResourceMax = ResourceType(0x1F) + // Node presents a resource which has a type and key, // this node can be used to lookup other nodes. type Node struct { @@ -80,6 +85,8 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc } } + // strip bits above max resource type + id.Type = id.Type & ResourceMax // mark as black when done reachable[id] = struct{}{} } diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 14bfea91b9e1a..9cfc03a30c483 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -21,11 +21,13 @@ import ( "fmt" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" - digest "github.com/opencontainers/go-digest" + "github.com/containerd/containerd/snapshots" + "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -40,7 +42,7 @@ type Image interface { // Labels of the image Labels() map[string]string // Unpack unpacks the image's content into a snapshot - Unpack(context.Context, string) error + Unpack(context.Context, string, ...UnpackOpt) error // RootFS returns the unpacked diffids that make up images rootfs. RootFS(ctx context.Context) ([]digest.Digest, error) // Size returns the total size of the image's packed resources. @@ -108,7 +110,10 @@ func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { } func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) { - sn := i.client.SnapshotService(snapshotterName) + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return false, err + } cs := i.client.ContentStore() diffs, err := i.i.RootFS(ctx, cs, i.platform) @@ -127,28 +132,53 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } -func (i *image) Unpack(ctx context.Context, snapshotterName string) error { +// UnpackConfig provides configuration for the unpack of an image +type UnpackConfig struct { + // ApplyOpts for applying a diff to a snapshotter + ApplyOpts []diff.ApplyOpt + // SnapshotOpts for configuring a snapshotter + SnapshotOpts []snapshots.Opt +} + +// UnpackOpt provides configuration for unpack +type UnpackOpt func(context.Context, *UnpackConfig) error + +func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { return err } defer done(ctx) + var config UnpackConfig + for _, o := range opts { + if err := o(ctx, &config); err != nil { + return err + } + } + layers, err := i.getLayers(ctx, i.platform) if err != nil { return err } var ( - sn = i.client.SnapshotService(snapshotterName) a = i.client.DiffService() cs = i.client.ContentStore() chain []digest.Digest unpacked bool ) + snapshotterName, err = i.client.resolveSnapshotterName(ctx, snapshotterName) + if err != nil { + return err + } + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return err + } for _, layer := range layers { - unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a) + unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts) if err != nil { return err } diff --git a/vendor/github.com/containerd/continuity/proto/gen.go b/vendor/github.com/containerd/containerd/images/annotations.go similarity index 73% rename from vendor/github.com/containerd/continuity/proto/gen.go rename to vendor/github.com/containerd/containerd/images/annotations.go index 63ce10fb53120..47d92104cddc6 100644 --- a/vendor/github.com/containerd/continuity/proto/gen.go +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -14,6 +14,10 @@ limitations under the License. */ -package proto +package images -//go:generate protoc --go_out=. manifest.proto +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go new file mode 100644 index 0000000000000..244ef322450a8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -0,0 +1,468 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "context" + "encoding/json" + "io" + "path" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type exportOptions struct { + manifests []ocispec.Descriptor + platform platforms.MatchComparer + allPlatforms bool + skipDockerManifest bool +} + +// ExportOpt defines options for configuring exported descriptors +type ExportOpt func(context.Context, *exportOptions) error + +// WithPlatform defines the platform to require manifest lists have +// not exporting all platforms. +// Additionally, platform is used to resolve image configs for +// Docker v1.1, v1.2 format compatibility. +func WithPlatform(p platforms.MatchComparer) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.platform = p + return nil + } +} + +// WithAllPlatforms exports all manifests from a manifest list. +// Missing content will fail the export. +func WithAllPlatforms() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.allPlatforms = true + return nil + } +} + +// WithSkipDockerManifest skips creation of the Docker compatible +// manifest.json file. +func WithSkipDockerManifest() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.skipDockerManifest = true + return nil + } +} + +// WithImage adds the provided images to the exported archive. +func WithImage(is images.Store, name string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + img, err := is.Get(ctx, name) + if err != nil { + return err + } + + img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + + return nil + } +} + +// WithManifest adds a manifest to the exported archive. +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + if len(names) == 0 { + o.manifests = append(o.manifests, manifest) + } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } + + return nil + } +} + +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v + } + + annotations[images.AnnotationImageName] = name + annotations[ocispec.AnnotationRefName] = ociReferenceName(name) + + return annotations +} + +// Export implements Exporter. +func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error { + var eo exportOptions + for _, opt := range opts { + if err := opt(ctx, &eo); err != nil { + return err + } + } + + records := []tarRecord{ + ociLayoutFile(""), + ociIndexRecord(eo.manifests), + } + + algorithms := map[string]struct{}{} + dManifests := map[digest.Digest]*exportManifest{} + resolvedIndex := map[digest.Digest]digest.Digest{} + for _, desc := range eo.manifests { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + mt, ok := dManifests[desc.Digest] + if !ok { + // TODO(containerd): Skip if already added + r, err := getRecords(ctx, store, desc, algorithms) + if err != nil { + return err + } + records = append(records, r...) + + mt = &exportManifest{ + manifest: desc, + } + dManifests[desc.Digest] = mt + } + + name := desc.Annotations[images.AnnotationImageName] + if name != "" && !eo.skipDockerManifest { + mt.names = append(mt.names, name) + } + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + d, ok := resolvedIndex[desc.Digest] + if !ok { + records = append(records, blobRecord(store, desc)) + + p, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return err + } + + var manifests []ocispec.Descriptor + for _, m := range index.Manifests { + if eo.platform != nil { + if m.Platform == nil || eo.platform.Match(*m.Platform) { + manifests = append(manifests, m) + } else if !eo.allPlatforms { + continue + } + } + + r, err := getRecords(ctx, store, m, algorithms) + if err != nil { + return err + } + + records = append(records, r...) + } + + if !eo.skipDockerManifest { + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) + } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], + } + } else if eo.platform != nil { + return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform") + } + } + resolvedIndex[desc.Digest] = d + } + if d != "" { + if name := desc.Annotations[images.AnnotationImageName]; name != "" { + mt := dManifests[d] + mt.names = append(mt.names, name) + } + + } + default: + return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported") + } + } + + if len(dManifests) > 0 { + tr, err := manifestsRecord(ctx, store, dManifests) + if err != nil { + return errors.Wrap(err, "unable to create manifests file") + } + + records = append(records, tr) + } + + if len(algorithms) > 0 { + records = append(records, directoryRecord("blobs/", 0755)) + for alg := range algorithms { + records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) + } + } + + tw := tar.NewWriter(writer) + defer tw.Close() + return writeTar(ctx, tw, records) +} + +func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) { + var records []tarRecord + exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + records = append(records, blobRecord(store, desc)) + algorithms[desc.Digest.Algorithm().String()] = struct{}{} + return nil, nil + } + + childrenHandler := images.ChildrenHandler(store) + + handlers := images.Handlers( + childrenHandler, + images.HandlerFunc(exportHandler), + ) + + // Walk sequentially since the number of fetchs is likely one and doing in + // parallel requires locking the export handler + if err := images.Walk(ctx, handlers, desc); err != nil { + return nil, err + } + + return records, nil +} + +type tarRecord struct { + Header *tar.Header + CopyTo func(context.Context, io.Writer) (int64, error) +} + +func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { + path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + return tarRecord{ + Header: &tar.Header{ + Name: path, + Mode: 0444, + Size: desc.Size, + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + r, err := cs.ReaderAt(ctx, desc) + if err != nil { + return 0, errors.Wrap(err, "failed to get reader") + } + defer r.Close() + + // Verify digest + dgstr := desc.Digest.Algorithm().Digester() + + n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) + if err != nil { + return 0, errors.Wrap(err, "failed to copy to tar") + } + if dgstr.Digest() != desc.Digest { + return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) + } + return n, nil + }, + } +} + +func directoryRecord(name string, mode int64) tarRecord { + return tarRecord{ + Header: &tar.Header{ + Name: name, + Mode: mode, + Typeflag: tar.TypeDir, + }, + } +} + +func ociLayoutFile(version string) tarRecord { + if version == "" { + version = ocispec.ImageLayoutVersion + } + layout := ocispec.ImageLayout{ + Version: version, + } + + b, err := json.Marshal(layout) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: ocispec.ImageLayoutFile, + Mode: 0444, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } + +} + +func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord { + index := ocispec.Index{ + Versioned: ocispecs.Versioned{ + SchemaVersion: 2, + }, + Manifests: manifests, + } + + b, err := json.Marshal(index) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: "index.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } +} + +type exportManifest struct { + manifest ocispec.Descriptor + names []string +} + +func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) { + mfsts := make([]struct { + Config string + RepoTags []string + Layers []string + }, len(manifests)) + + var i int + for _, m := range manifests { + p, err := content.ReadBlob(ctx, store, m.manifest) + if err != nil { + return tarRecord{}, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return tarRecord{}, err + } + if err := manifest.Config.Digest.Validate(); err != nil { + return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest) + } + + dgst := manifest.Config.Digest + mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded()) + for _, l := range manifest.Layers { + path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded()) + mfsts[i].Layers = append(mfsts[i].Layers, path) + } + + for _, name := range m.names { + nname, err := familiarizeReference(name) + if err != nil { + return tarRecord{}, err + } + + mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname) + } + + i++ + } + + b, err := json.Marshal(mfsts) + if err != nil { + return tarRecord{}, err + } + + return tarRecord{ + Header: &tar.Header{ + Name: "manifest.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + }, nil +} + +func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { + sort.Slice(records, func(i, j int) bool { + return records[i].Header.Name < records[j].Header.Name + }) + + var last string + for _, record := range records { + if record.Header.Name == last { + continue + } + last = record.Header.Name + if err := tw.WriteHeader(record.Header); err != nil { + return err + } + if record.CopyTo != nil { + n, err := record.CopyTo(ctx, tw) + if err != nil { + return err + } + if n != record.Header.Size { + return errors.Errorf("unexpected copy size for %s", record.Header.Name) + } + } else if record.Header.Size > 0 { + return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index 692c76b1ff010..539900685525b 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -22,12 +22,14 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "path" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" digest "github.com/opencontainers/go-digest" @@ -36,6 +38,22 @@ import ( "github.com/pkg/errors" ) +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + // ImportIndex imports an index from a tar archive image bundle // - implements Docker v1.1, v1.2 and OCI v1. // - prefers OCI v1 when provided @@ -43,8 +61,7 @@ import ( // - normalizes Docker references and adds as OCI ref name // e.g. alpine:latest -> docker.io/library/alpine:latest // - existing OCI reference names are untouched -// - TODO: support option to compress layers on ingest -func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) { +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } symlinks = make(map[string]string) blobs = make(map[string]ocispec.Descriptor) + iopts importOpts ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + for { hdr, err := tr.Next() if err == io.EOF { @@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc if !ok { return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) } - config.MediaType = ocispec.MediaTypeImageConfig + config.MediaType = images.MediaTypeDockerSchema2Config - layers, err := resolveLayers(ctx, store, mfst.Layers, blobs) + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") } - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, } desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest) @@ -181,7 +210,8 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } mfstdesc.Annotations = map[string]string{ - ocispec.AnnotationRefName: normalized, + images.AnnotationImageName: normalized, + ocispec.AnnotationRefName: ociReferenceName(normalized), } idx.Manifests = append(idx.Manifests, mfstdesc) @@ -210,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size return dgstr.Digest(), nil } -func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var layers []ocispec.Descriptor - for _, f := range layerFiles { +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + filters := []string{} + for i, f := range layerFiles { desc, ok := blobs[f] if !ok { return nil, errors.Errorf("layer %q not found", f) } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + } + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels["containerd.io/uncompressed"] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + desc.Digest = info.Digest + desc.Size = info.Size + } + } + return nil + }, filters...) + if err != nil { + return nil, errors.Wrap(err, "failure checking for compressed blobs") + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } // Open blob, resolve media type ra, err := store.ReaderAt(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest) + return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) } s, err := compression.DecompressStream(content.NewReader(ra)) if err != nil { - return nil, errors.Wrapf(err, "failed to detect compression for %q", f) + return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) } if s.GetCompression() == compression.Uncompressed { - // TODO: Support compressing and writing back to content store - desc.MediaType = ocispec.MediaTypeImageLayer + if compress { + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + "containerd.io/uncompressed": desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } } else { - desc.MediaType = ocispec.MediaTypeImageLayerGzip + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip } s.Close() - layers = append(layers, desc) } return layers, nil } +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + } + } + + return desc, nil +} + func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { manifestBytes, err := json.Marshal(manifest) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index 2e80a968a0aaa..cd63517e53563 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -19,7 +19,8 @@ package archive import ( "strings" - "github.com/docker/distribution/reference" + "github.com/containerd/containerd/reference" + distref "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -69,7 +70,7 @@ func isImagePrefix(s, prefix string) bool { func normalizeReference(ref string) (string, error) { // TODO: Replace this function to not depend on reference package - normalized, err := reference.ParseDockerRef(ref) + normalized, err := distref.ParseDockerRef(ref) if err != nil { return "", errors.Wrapf(err, "normalize image ref %q", ref) } @@ -77,6 +78,31 @@ func normalizeReference(ref string) (string, error) { return normalized.String(), nil } +func familiarizeReference(ref string) (string, error) { + named, err := distref.ParseNormalizedNamed(ref) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %q", ref) + } + named = distref.TagNameOnly(named) + + return distref.FamiliarString(named), nil +} + +func ociReferenceName(name string) string { + // OCI defines the reference name as only a tag excluding the + // repository. The containerd annotation contains the full image name + // since the tag is insufficient for correctly naming and referring to an + // image + var ociRef string + if spec, err := reference.Parse(name); err == nil { + ociRef = spec.Object + } else { + ociRef = name + } + + return ociRef +} + // DigestTranslator creates a digest reference by adding the // digest to an image name func DigestTranslator(prefix string) func(digest.Digest) string { diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index f72684d82946e..7d4f39c0aca85 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -119,7 +119,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor } size += desc.Size return nil, nil - }), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target) + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) } type platformManifest struct { @@ -142,6 +142,7 @@ type platformManifest struct { // this direction because this abstraction is not needed.` func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( + limit = 1 m []platformManifest wasIndex bool ) @@ -210,10 +211,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } } + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + wasIndex = true + if len(descs) > limit { + return descs[:limit], nil + } return descs, nil - } return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) }), image); err != nil { @@ -227,17 +240,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } return ocispec.Manifest{}, err } - - sort.SliceStable(m, func(i, j int) bool { - if m[i].p == nil { - return false - } - if m[j].p == nil { - return true - } - return platform.Less(*m[i].p, *m[j].p) - }) - return *m[0].m, nil } @@ -357,6 +359,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr descs = append(descs, index.Manifests...) case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerEnc, MediaTypeDockerSchema2LayerGzipEnc, MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, @@ -406,3 +409,53 @@ func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) { } return false, nil } + +// GetImageLayerDescriptors gets the image layer Descriptors of an image; the array contains +// a list of Descriptors belonging to one platform followed by lists of other platforms +func GetImageLayerDescriptors(ctx context.Context, cs content.Store, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var lis []ocispec.Descriptor + + ds := platforms.DefaultSpec() + platform := &ds + + switch desc.MediaType { + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex, + MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + children, err := Children(ctx, cs, desc) + if err != nil { + if errdefs.IsNotFound(err) { + return []ocispec.Descriptor{}, nil + } + return []ocispec.Descriptor{}, err + } + + if desc.Platform != nil { + platform = desc.Platform + } + + for _, child := range children { + var tmp []ocispec.Descriptor + + switch child.MediaType { + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerGzipEnc, MediaTypeDockerSchema2LayerEnc: + tdesc := child + tdesc.Platform = platform + tmp = append(tmp, tdesc) + default: + tmp, err = GetImageLayerDescriptors(ctx, cs, child) + } + + if err != nil { + return []ocispec.Descriptor{}, err + } + + lis = append(lis, tmp...) + } + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + default: + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "GetImageLayerInfo: unhandled media type %s", desc.MediaType) + } + return lis, nil +} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 186a3b6730395..5fee7467a8aeb 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -22,8 +22,10 @@ package images // here for clarity. const ( MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerEnc = "application/vnd.docker.image.rootfs.diff.tar+enc" MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerGzipEnc = "application/vnd.docker.image.rootfs.diff.tar.gzip+enc" MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" diff --git a/vendor/github.com/containerd/containerd/images/oci/exporter.go b/vendor/github.com/containerd/containerd/images/oci/exporter.go deleted file mode 100644 index 8bb5354894c82..0000000000000 --- a/vendor/github.com/containerd/containerd/images/oci/exporter.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package oci - -import ( - "archive/tar" - "context" - "encoding/json" - "io" - "sort" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - ocispecs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// V1Exporter implements OCI Image Spec v1. -// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc. -// -// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types, -// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip -// -> application/vnd.oci.image.layer.v1.tar+gzip -type V1Exporter struct { - AllPlatforms bool -} - -// V1ExporterOpt allows the caller to set additional options to a new V1Exporter -type V1ExporterOpt func(c *V1Exporter) error - -// DefaultV1Exporter return a default V1Exporter pointer -func DefaultV1Exporter() *V1Exporter { - return &V1Exporter{ - AllPlatforms: false, - } -} - -// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt -func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) { - exporter := DefaultV1Exporter() - for _, o := range opts { - if err := o(exporter); err != nil { - return exporter, err - } - } - return exporter, nil -} - -// WithAllPlatforms set V1Exporter`s AllPlatforms option -func WithAllPlatforms(allPlatforms bool) V1ExporterOpt { - return func(c *V1Exporter) error { - c.AllPlatforms = allPlatforms - return nil - } -} - -// Export implements Exporter. -func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error { - tw := tar.NewWriter(writer) - defer tw.Close() - - records := []tarRecord{ - ociLayoutFile(""), - ociIndexRecord(desc), - } - - algorithms := map[string]struct{}{} - exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - records = append(records, blobRecord(store, desc)) - algorithms[desc.Digest.Algorithm().String()] = struct{}{} - return nil, nil - } - - childrenHandler := images.ChildrenHandler(store) - - if !oe.AllPlatforms { - // get local default platform to fetch image manifest - childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec())) - } - - handlers := images.Handlers( - childrenHandler, - images.HandlerFunc(exportHandler), - ) - - // Walk sequentially since the number of fetchs is likely one and doing in - // parallel requires locking the export handler - if err := images.Walk(ctx, handlers, desc); err != nil { - return err - } - - if len(algorithms) > 0 { - records = append(records, directoryRecord("blobs/", 0755)) - for alg := range algorithms { - records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) - } - } - - return writeTar(ctx, tw, records) -} - -type tarRecord struct { - Header *tar.Header - CopyTo func(context.Context, io.Writer) (int64, error) -} - -func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { - path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex() - return tarRecord{ - Header: &tar.Header{ - Name: path, - Mode: 0444, - Size: desc.Size, - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - r, err := cs.ReaderAt(ctx, desc) - if err != nil { - return 0, errors.Wrap(err, "failed to get reader") - } - defer r.Close() - - // Verify digest - dgstr := desc.Digest.Algorithm().Digester() - - n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) - if err != nil { - return 0, errors.Wrap(err, "failed to copy to tar") - } - if dgstr.Digest() != desc.Digest { - return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) - } - return n, nil - }, - } -} - -func directoryRecord(name string, mode int64) tarRecord { - return tarRecord{ - Header: &tar.Header{ - Name: name, - Mode: mode, - Typeflag: tar.TypeDir, - }, - } -} - -func ociLayoutFile(version string) tarRecord { - if version == "" { - version = ocispec.ImageLayoutVersion - } - layout := ocispec.ImageLayout{ - Version: version, - } - - b, err := json.Marshal(layout) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: ocispec.ImageLayoutFile, - Mode: 0444, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } - -} - -func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord { - index := ocispec.Index{ - Versioned: ocispecs.Versioned{ - SchemaVersion: 2, - }, - Manifests: manifests, - } - - b, err := json.Marshal(index) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: "index.json", - Mode: 0644, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } -} - -func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { - sort.Slice(records, func(i, j int) bool { - return records[i].Header.Name < records[j].Header.Name - }) - - for _, record := range records { - if err := tw.WriteHeader(record.Header); err != nil { - return err - } - if record.CopyTo != nil { - n, err := record.CopyTo(ctx, tw) - if err != nil { - return err - } - if n != record.Header.Size { - return errors.Errorf("unexpected copy size for %s", record.Header.Name) - } - } else if record.Header.Size > 0 { - return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) - } - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 9825f31672083..8dc61218c0485 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -35,6 +35,7 @@ type importOpts struct { imageRefT func(string) string dgstRefT func(digest.Digest) string allPlatforms bool + compress bool } // ImportOpt allows the caller to specify import specific options @@ -74,6 +75,15 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt { } } +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(c *importOpts) error { + c.compress = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. // Note that unreferrenced blobs may be imported to the content store as well. @@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } defer done(ctx) - index, err := archive.ImportIndex(ctx, c.ContentStore(), reader) + var aio []archive.ImportOpt + if iopts.compress { + aio = append(aio, archive.WithImportCompression()) + } + + index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...) if err != nil { return nil, err } @@ -130,16 +145,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } for _, m := range idx.Manifests { - if ref := m.Annotations[ocispec.AnnotationRefName]; ref != "" { - if iopts.imageRefT != nil { - ref = iopts.imageRefT(ref) - } - if ref != "" { - imgs = append(imgs, images.Image{ - Name: ref, - Target: m, - }) - } + name := imageName(m.Annotations, iopts.imageRefT) + if name != "" { + imgs = append(imgs, images.Image{ + Name: name, + Target: m, + }) } if iopts.dgstRefT != nil { ref := iopts.dgstRefT(m.Digest) @@ -178,3 +189,17 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt return imgs, nil } + +func imageName(annotations map[string]string, ociCleanup func(string) string) string { + name := annotations[images.AnnotationImageName] + if name != "" { + return name + } + name = annotations[ocispec.AnnotationRefName] + if name != "" { + if ociCleanup != nil { + name = ociCleanup(name) + } + } + return name +} diff --git a/vendor/github.com/containerd/containerd/leases/lease.go b/vendor/github.com/containerd/containerd/leases/lease.go index 909b4ea0bb184..058d065594f35 100644 --- a/vendor/github.com/containerd/containerd/leases/lease.go +++ b/vendor/github.com/containerd/containerd/leases/lease.go @@ -32,6 +32,9 @@ type Manager interface { Create(context.Context, ...Opt) (Lease, error) Delete(context.Context, Lease, ...DeleteOpt) error List(context.Context, ...string) ([]Lease, error) + AddResource(context.Context, Lease, Resource) error + DeleteResource(context.Context, Lease, Resource) error + ListResources(context.Context, Lease) ([]Resource, error) } // Lease retains resources to prevent cleanup before @@ -42,6 +45,13 @@ type Lease struct { Labels map[string]string } +// Resource represents low level resource of image, like content, ingest and +// snapshotter. +type Resource struct { + ID string + Type string +} + // DeleteOptions provide options on image delete type DeleteOptions struct { Synchronous bool diff --git a/vendor/github.com/containerd/containerd/leases/proxy/manager.go b/vendor/github.com/containerd/containerd/leases/proxy/manager.go index 30afe5368e7ae..96cd5e653ba47 100644 --- a/vendor/github.com/containerd/containerd/leases/proxy/manager.go +++ b/vendor/github.com/containerd/containerd/leases/proxy/manager.go @@ -91,3 +91,43 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L return l, nil } + +func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + _, err := pm.client.AddResource(ctx, &leasesapi.AddResourceRequest{ + ID: lease.ID, + Resource: leasesapi.Resource{ + ID: r.ID, + Type: r.Type, + }, + }) + return errdefs.FromGRPC(err) +} + +func (pm *proxyManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + _, err := pm.client.DeleteResource(ctx, &leasesapi.DeleteResourceRequest{ + ID: lease.ID, + Resource: leasesapi.Resource{ + ID: r.ID, + Type: r.Type, + }, + }) + return errdefs.FromGRPC(err) +} + +func (pm *proxyManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + resp, err := pm.client.ListResources(ctx, &leasesapi.ListResourcesRequest{ + ID: lease.ID, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + rs := make([]leases.Resource, 0, len(resp.Resources)) + for _, i := range resp.Resources { + rs = append(rs, leases.Resource{ + ID: i.ID, + Type: i.Type, + }) + } + return rs, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index 00f310f7b4502..4a07a256b54ef 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -567,6 +567,8 @@ func (nw *namespacedWriter) createAndCopy(ctx context.Context, desc ocispec.Desc } func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + ctx = namespaces.WithNamespace(ctx, nw.namespace) + nw.l.RLock() defer nw.l.RUnlock() @@ -635,11 +637,11 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) } size = status.Offset - actual = nw.w.Digest() if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } + actual = nw.w.Digest() } bkt, err := createBlobBucket(tx, nw.namespace, actual) diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index 6afaa17729181..afe16c9222d77 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -46,11 +46,17 @@ const ( ResourceIngest ) +const ( + resourceContentFlat = ResourceContent | 0x20 + resourceSnapshotFlat = ResourceSnapshot | 0x20 +) + var ( labelGCRoot = []byte("containerd.io/gc.root") labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") labelGCContentRef = []byte("containerd.io/gc.ref.content") labelGCExpire = []byte("containerd.io/gc.expire") + labelGCFlat = []byte("containerd.io/gc.flat") ) func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { @@ -90,6 +96,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } libkt := lbkt.Bucket(k) + var flat bool if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil { if expV := lblbkt.Get(labelGCExpire); expV != nil { @@ -102,6 +109,10 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } } + + if flatV := lblbkt.Get(labelGCFlat); flatV != nil { + flat = true + } } fn(gcnode(ResourceLease, ns, string(k))) @@ -111,16 +122,26 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { // no need to allow the lookup to be recursive, handling here // therefore reduces the number of database seeks. + ctype := ResourceContent + if flat { + ctype = resourceContentFlat + } + cbkt := libkt.Bucket(bucketKeyObjectContent) if cbkt != nil { if err := cbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceContent, ns, string(k))) + fn(gcnode(ctype, ns, string(k))) return nil }); err != nil { return err } } + stype := ResourceSnapshot + if flat { + stype = resourceSnapshotFlat + } + sbkt := libkt.Bucket(bucketKeyObjectSnapshots) if sbkt != nil { if err := sbkt.ForEach(func(sk, sv []byte) error { @@ -130,7 +151,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { snbkt := sbkt.Bucket(sk) return snbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k))) + fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k))) return nil }) }); err != nil { @@ -257,7 +278,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { } func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { - if node.Type == ResourceContent { + switch node.Type { + case ResourceContent: bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) if bkt == nil { // Node may be created from dead edge @@ -265,7 +287,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceSnapshot { + case ResourceSnapshot, resourceSnapshotFlat: parts := strings.SplitN(node.Key, "/", 2) if len(parts) != 2 { return errors.Errorf("invalid snapshot gc key %s", node.Key) @@ -280,11 +302,16 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } if pv := bkt.Get(bucketKeyParent); len(pv) > 0 { - fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + } + + // Do not send labeled references for flat snapshot refs + if node.Type == resourceSnapshotFlat { + return nil } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceIngest { + case ResourceIngest: // Send expected value bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) if bkt == nil { diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go index a3c1701d8addf..3050d78bf4b4f 100644 --- a/vendor/github.com/containerd/containerd/metadata/leases.go +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -18,6 +18,8 @@ package metadata import ( "context" + "fmt" + "strings" "time" "github.com/containerd/containerd/errdefs" @@ -167,6 +169,128 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, return ll, nil } +// AddResource references the resource by the provided lease. +func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + keys, ref, err := parseLeaseResource(r) + if err != nil { + return err + } + + bkt := topbkt + for _, key := range keys { + bkt, err = bkt.CreateBucketIfNotExists([]byte(key)) + if err != nil { + return err + } + } + return bkt.Put([]byte(ref), nil) +} + +// DeleteResource dereferences the resource by the provided lease. +func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + keys, ref, err := parseLeaseResource(r) + if err != nil { + return err + } + + bkt := topbkt + for _, key := range keys { + if bkt == nil { + break + } + bkt = bkt.Bucket([]byte(key)) + } + + if bkt == nil { + return nil + } + return bkt.Delete([]byte(ref)) +} + +// ListResources lists all the resources referenced by the lease. +func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + rs := make([]leases.Resource, 0) + + // content resources + if cbkt := topbkt.Bucket(bucketKeyObjectContent); cbkt != nil { + if err := cbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectContent), + }) + + return nil + }); err != nil { + return nil, err + } + } + + // ingest resources + if lbkt := topbkt.Bucket(bucketKeyObjectIngests); lbkt != nil { + if err := lbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectIngests), + }) + + return nil + }); err != nil { + return nil, err + } + } + + // snapshot resources + if sbkt := topbkt.Bucket(bucketKeyObjectSnapshots); sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + + snbkt := sbkt.Bucket(sk) + return snbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: fmt.Sprintf("%s/%s", bucketKeyObjectSnapshots, sk), + }) + return nil + }) + }); err != nil { + return nil, err + } + } + return rs, nil +} + func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { lid, ok := leases.FromContext(ctx) if !ok { @@ -307,3 +431,36 @@ func removeIngestLease(ctx context.Context, tx *bolt.Tx, ref string) error { return bkt.Delete([]byte(ref)) } + +func parseLeaseResource(r leases.Resource) ([]string, string, error) { + var ( + ref = r.ID + typ = r.Type + keys = strings.Split(typ, "/") + ) + + switch k := keys[0]; k { + case string(bucketKeyObjectContent), + string(bucketKeyObjectIngests): + + if len(keys) != 1 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid resource type %s", typ) + } + + if k == string(bucketKeyObjectContent) { + dgst, err := digest.Parse(ref) + if err != nil { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid content resource id %s: %v", ref, err) + } + ref = dgst.String() + } + case string(bucketKeyObjectSnapshots): + if len(keys) != 2 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid snapshot resource type %s", typ) + } + default: + return nil, "", errors.Wrapf(errdefs.ErrNotImplemented, "resource type %s not supported yet", typ) + } + + return keys, ref, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go index 74951eb5c5b62..25d0e1578b2f0 100644 --- a/vendor/github.com/containerd/containerd/metadata/namespaces.go +++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go @@ -129,7 +129,15 @@ func (s *namespaceStore) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (s *namespaceStore) Delete(ctx context.Context, namespace string) error { +func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := &namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, i); err != nil { + return err + } + } bkt := getBucket(s.tx, bucketKeyVersion) if empty, err := s.namespaceEmpty(ctx, namespace); err != nil { return err diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 54209171989de..23976636f8563 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -34,6 +34,10 @@ import ( bolt "go.etcd.io/bbolt" ) +const ( + inheritedLabelsPrefix = "containerd.io/snapshot/" +) + type snapshotter struct { snapshots.Snapshotter name string @@ -209,6 +213,15 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath bkey = string(sbkt.Get(bucketKeyName)) local.Parent = string(sbkt.Get(bucketKeyParent)) + inner := snapshots.Info{ + Name: bkey, + Labels: filterInheritedLabels(local.Labels), + } + + if _, err := s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil { + return err + } + return nil }); err != nil { return snapshots.Info{}, err @@ -338,12 +351,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time if readonly { - m, err = s.Snapshotter.View(ctx, bkey, bparent) + m, err = s.Snapshotter.View(ctx, bkey, bparent, inheritedOpt) } else { - m, err = s.Snapshotter.Prepare(ctx, bkey, bparent) + m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, inheritedOpt) } return err }); err != nil { @@ -445,9 +460,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time - return s.Snapshotter.Commit(ctx, nameKey, bkey) + return s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt) }) } @@ -761,3 +778,19 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { func (s *snapshotter) Close() error { return s.Snapshotter.Close() } + +// filterInheritedLabels filters the provided labels by removing any key which doesn't have +// a prefix of "containerd.io/snapshot/". +func filterInheritedLabels(labels map[string]string) map[string]string { + if labels == nil { + return nil + } + + filtered := make(map[string]string) + for k, v := range labels { + if strings.HasPrefix(k, inheritedLabelsPrefix) { + filtered[k] = v + } + } + return filtered +} diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index eea70ca33a3e1..4c66406b084ea 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -100,10 +100,18 @@ func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error { - var req api.DeleteNamespaceRequest - - req.Name = namespace +func (r *remoteNamespaces) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, &i); err != nil { + return err + } + } + req := api.DeleteNamespaceRequest{ + Name: namespace, + } _, err := r.client.Delete(ctx, &req) if err != nil { return errdefs.FromGRPC(err) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index cc5621a68fb15..b4e988e7b723d 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -36,10 +36,9 @@ type namespaceKey struct{} // WithNamespace sets a given namespace on the context func WithNamespace(ctx context.Context, namespace string) context.Context { ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace - - // also store on the grpc headers so it gets picked up by any clients that + // also store on the grpc and ttrpc headers so it gets picked up by any clients that // are using this. - return withGRPCNamespaceHeader(ctx, namespace) + return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace) } // NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or @@ -58,9 +57,10 @@ func NamespaceFromEnv(ctx context.Context) context.Context { func Namespace(ctx context.Context) (string, bool) { namespace, ok := ctx.Value(namespaceKey{}).(string) if !ok { - return fromGRPCHeader(ctx) + if namespace, ok = fromGRPCHeader(ctx); !ok { + return fromTTRPCHeader(ctx) + } } - return namespace, ok } @@ -70,10 +70,8 @@ func NamespaceRequired(ctx context.Context) (string, error) { if !ok || namespace == "" { return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") } - if err := Validate(namespace); err != nil { return "", errors.Wrap(err, "namespace validation") } - return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 0b5c985691977..5936772cb4cb2 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -33,5 +33,14 @@ type Store interface { List(ctx context.Context) ([]string, error) // Delete removes the namespace. The namespace must be empty to be deleted. - Delete(ctx context.Context, namespace string) error + Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error } + +// DeleteInfo specifies information for the deletion of a namespace +type DeleteInfo struct { + // Name of the namespace + Name string +} + +// DeleteOpts allows the caller to set options for namespace deletion +type DeleteOpts func(context.Context, *DeleteInfo) error diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go new file mode 100644 index 0000000000000..bcd2643cf5ea2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + "github.com/containerd/ttrpc" +) + +const ( + // TTRPCHeader defines the header name for specifying a containerd namespace + TTRPCHeader = "containerd-namespace-ttrpc" +) + +func copyMetadata(src ttrpc.MD) ttrpc.MD { + md := ttrpc.MD{} + for k, v := range src { + md[k] = append(md[k], v...) + } + return md +} + +func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + md, ok := ttrpc.GetMetadata(ctx) + if !ok { + md = ttrpc.MD{} + } else { + md = copyMetadata(md) + } + md.Set(TTRPCHeader, namespace) + return ttrpc.WithMetadata(ctx, md) +} + +func fromTTRPCHeader(ctx context.Context) (string, bool) { + return ttrpc.GetMetadataValue(ctx, TTRPCHeader) +} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go b/vendor/github.com/containerd/containerd/namespaces_opts_linux.go similarity index 54% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go rename to vendor/github.com/containerd/containerd/namespaces_opts_linux.go index 53252ec604f5f..6b8cc8f855c48 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go +++ b/vendor/github.com/containerd/containerd/namespaces_opts_linux.go @@ -1,5 +1,3 @@ -// +build !windows - /* Copyright The containerd Authors. @@ -16,27 +14,23 @@ limitations under the License. */ -package proc +package containerd import ( - "github.com/pkg/errors" -) + "context" -// RuncRoot is the path to the root runc state directory -const RuncRoot = "/run/containerd/runc" + "github.com/containerd/cgroups" + "github.com/containerd/containerd/namespaces" +) -func stateName(v interface{}) string { - switch v.(type) { - case *runningState, *execRunningState: - return "running" - case *createdState, *execCreatedState, *createdCheckpointState: - return "created" - case *pausedState: - return "paused" - case *deletedState: - return "deleted" - case *stoppedState: - return "stopped" +// WithNamespaceCgroupDeletion removes the cgroup directory that was created for the namespace +func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) error { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(i.Name)) + if err != nil { + if err == cgroups.ErrCgroupDeleted { + return nil + } + return err } - panic(errors.Errorf("invalid state %v", v)) + return cg.Delete() } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index ce756108adbce..8fe3247b5430d 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -76,6 +76,20 @@ func setLinux(s *Spec) { } } +// nolint +func setResources(s *Spec) { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + } +} + // setCapabilities sets Linux Capabilities to empty if unset func setCapabilities(s *Spec) { setProcess(s) @@ -1139,3 +1153,39 @@ func WithAnnotations(annotations map[string]string) SpecOpts { return nil } } + +// WithLinuxDevices adds the provided linux devices to the spec +func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Devices = append(s.Linux.Devices, devices...) + return nil + } +} + +var ErrNotADevice = errors.New("not a device node") + +// WithLinuxDevice adds the device specified by path to the spec +func WithLinuxDevice(path, permissions string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + setResources(s) + + dev, err := deviceFromPath(path, permissions) + if err != nil { + return err + } + + s.Linux.Devices = append(s.Linux.Devices, *dev) + + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ + Type: dev.Type, + Allow: true, + Major: &dev.Major, + Minor: &dev.Minor, + Access: permissions, + }) + + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go new file mode 100644 index 0000000000000..918c8f4ec3c53 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -0,0 +1,64 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + // The type is 32bit on mips. + devNumber = uint64(stat.Rdev) // nolint: unconvert + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go new file mode 100644 index 0000000000000..3f63dfd162bb5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -0,0 +1,63 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index fbe1cb33c28b0..d265d544deff1 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -23,6 +23,7 @@ import ( "github.com/containerd/containerd/containers" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the @@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { return nil } } + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + return nil, errors.New("device from path not supported on Windows") +} diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index 766d34493445c..aa604baab9217 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -17,6 +17,7 @@ package dialer import ( + "context" "net" "time" @@ -28,8 +29,19 @@ type dialResult struct { err error } +// ContextDialer returns a GRPC net.Conn connected to the provided address +func ContextDialer(ctx context.Context, address string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return timeoutDialer(address, time.Until(deadline)) + } + return timeoutDialer(address, 0) +} + // Dialer returns a GRPC net.Conn connected to the provided address -func Dialer(address string, timeout time.Duration) (net.Conn, error) { +// Deprecated: use ContextDialer and grpc.WithContextDialer. +var Dialer = timeoutDialer + +func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { var ( stopC = make(chan struct{}) synC = make(chan *dialResult) diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go rename to vendor/github.com/containerd/containerd/pkg/process/deleted_state.go index fe9d7bf554111..95ad138e062d1 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go @@ -16,14 +16,13 @@ limitations under the License. */ -package proc +package process import ( "context" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) @@ -67,6 +66,6 @@ func (s *deletedState) SetExited(status int) { // no op } -func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a deleted state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go b/vendor/github.com/containerd/containerd/pkg/process/exec.go similarity index 91% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go rename to vendor/github.com/containerd/containerd/pkg/process/exec.go index 5ab232ae7df51..4175dcd5a4ee3 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -31,7 +31,8 @@ import ( "golang.org/x/sys/unix" "github.com/containerd/console" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -49,10 +50,10 @@ type execProcess struct { io *processIO status int exited time.Time - pid *safePid + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio path string spec specs.Process @@ -95,6 +96,7 @@ func (e *execProcess) setExited(status int) { e.status = status e.exited = time.Now() e.parent.Platform.ShutdownConsole(context.Background(), e.console) + e.pid.set(StoppedPID) close(e.waitBlock) } @@ -106,7 +108,7 @@ func (e *execProcess) Delete(ctx context.Context) error { } func (e *execProcess) delete(ctx context.Context) error { - e.wg.Wait() + waitTimeout(ctx, &e.wg, 2*time.Second) if e.io != nil { for _, c := range e.closers { c.Close() @@ -142,7 +144,12 @@ func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error { func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { pid := e.pid.get() - if pid != 0 { + switch { + case pid == 0: + return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created") + case pid < 0: + return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + default: if err := unix.Kill(pid, syscall.Signal(sig)); err != nil { return errors.Wrapf(checkKillError(err), "exec kill error") } @@ -154,7 +161,7 @@ func (e *execProcess) Stdin() io.Closer { return e.stdin } -func (e *execProcess) Stdio() proc.Stdio { +func (e *execProcess) Stdio() stdio.Stdio { return e.stdio } @@ -254,10 +261,13 @@ func (e *execProcess) Status(ctx context.Context) (string, error) { } e.mu.Lock() defer e.mu.Unlock() - // if we don't have a pid then the exec process has just been created + // if we don't have a pid(pid=0) then the exec process has just been created if e.pid.get() == 0 { return "created", nil } + if e.pid.get() == StoppedPID { + return "stopped", nil + } // if we have a pid and it can be signaled, the process is running if err := unix.Kill(e.pid.get(), 0); err == nil { return "running", nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go rename to vendor/github.com/containerd/containerd/pkg/process/exec_state.go index 12489501ba880..a8b44bb8bcddd 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go b/vendor/github.com/containerd/containerd/pkg/process/init.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go rename to vendor/github.com/containerd/containerd/pkg/process/init.go index 10787ed878296..7861bdd8b6863 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -33,7 +33,7 @@ import ( "github.com/containerd/console" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" @@ -59,15 +59,15 @@ type Init struct { id string Bundle string console console.Console - Platform proc.Platform + Platform stdio.Platform io *processIO runtime *runc.Runc status int exited time.Time - pid int + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio Rootfs string IoUID int IoGID int @@ -93,7 +93,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru } // New returns a new process -func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init { +func New(id string, runtime *runc.Runc, stdio stdio.Stdio) *Init { p := &Init{ id: id, runtime: runtime, @@ -113,6 +113,9 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { pio *processIO pidFile = newPidFile(p.Bundle) ) + p.pid.Lock() + defer p.pid.Unlock() + if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { return errors.Wrap(err, "failed to create OCI runtime console socket") @@ -167,7 +170,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return nil } @@ -213,7 +216,7 @@ func (p *Init) ID() string { // Pid of the process func (p *Init) Pid() int { - return p.pid + return p.pid.get() } // ExitStatus of the process @@ -272,6 +275,7 @@ func (p *Init) setExited(status int) { p.exited = time.Now() p.status = status p.Platform.ShutdownConsole(context.Background(), p.console) + p.pid.set(StoppedPID) close(p.waitBlock) } @@ -284,7 +288,7 @@ func (p *Init) Delete(ctx context.Context) error { } func (p *Init) delete(ctx context.Context) error { - p.wg.Wait() + waitTimeout(ctx, &p.wg, 2*time.Second) err := p.runtime.Delete(ctx, p.id, nil) // ignore errors if a runtime has already deleted the process // but we still hold metadata and pipes @@ -324,13 +328,6 @@ func (p *Init) Resize(ws console.WinSize) error { return p.console.Resize(ws) } -func (p *Init) resize(ws console.WinSize) error { - if p.console == nil { - return nil - } - return p.console.Resize(ws) -} - // Pause the init process and all its child processes func (p *Init) Pause(ctx context.Context) error { p.mu.Lock() @@ -384,7 +381,7 @@ func (p *Init) Runtime() *runc.Runc { } // Exec returns a new child process -func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { p.mu.Lock() defer p.mu.Unlock() @@ -392,7 +389,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce } // exec returns a new exec'd process -func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { // process exec request var spec specs.Process if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { @@ -405,14 +402,13 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce path: path, parent: p, spec: spec, - stdio: proc.Stdio{ + stdio: stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, Terminal: r.Terminal, }, waitBlock: make(chan struct{}), - pid: &safePid{}, } e.execState = &execCreatedState{p: e} return e, nil @@ -472,7 +468,7 @@ func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error { } // Stdio of the process -func (p *Init) Stdio() proc.Stdio { +func (p *Init) Stdio() stdio.Stdio { return p.stdio } @@ -492,7 +488,7 @@ func (p *Init) runtimeError(rErr error, msg string) error { } } -func withConditionalIO(c proc.Stdio) runc.IOOpt { +func withConditionalIO(c stdio.Stdio) runc.IOOpt { return func(o *runc.IOOption) { o.OpenStdin = c.Stdin != "" o.OpenStdout = c.Stdout != "" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go b/vendor/github.com/containerd/containerd/pkg/process/init_state.go similarity index 92% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go rename to vendor/github.com/containerd/containerd/pkg/process/init_state.go index 51849c62b4f62..9ec1d17be0e48 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init_state.go @@ -16,13 +16,11 @@ limitations under the License. */ -package proc +package process import ( "context" - "github.com/containerd/console" - "github.com/containerd/containerd/runtime/proc" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" @@ -30,14 +28,13 @@ import ( ) type initState interface { - Resize(console.WinSize) error Start(context.Context) error Delete(context.Context) error Pause(context.Context) error Resume(context.Context) error Update(context.Context, *google_protobuf.Any) error Checkpoint(context.Context, *CheckpointConfig) error - Exec(context.Context, string, *ExecConfig) (proc.Process, error) + Exec(context.Context, string, *ExecConfig) (Process, error) Kill(context.Context, uint32, bool) error SetExited(int) } @@ -76,10 +73,6 @@ func (s *createdState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return errors.Errorf("cannot checkpoint a task in created state") } -func (s *createdState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *createdState) Start(ctx context.Context) error { if err := s.p.start(ctx); err != nil { return err @@ -106,7 +99,7 @@ func (s *createdState) SetExited(status int) { } } -func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -145,14 +138,13 @@ func (s *createdCheckpointState) Checkpoint(ctx context.Context, r *CheckpointCo return errors.Errorf("cannot checkpoint a task in created state") } -func (s *createdCheckpointState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *createdCheckpointState) Start(ctx context.Context) error { p := s.p sio := p.stdio + p.pid.Lock() + defer p.pid.Unlock() + var ( err error socket *runc.Socket @@ -192,7 +184,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return s.transition("running") } @@ -215,7 +207,7 @@ func (s *createdCheckpointState) SetExited(status int) { } } -func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a created state") } @@ -255,10 +247,6 @@ func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return s.p.checkpoint(ctx, r) } -func (s *runningState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *runningState) Start(ctx context.Context) error { return errors.Errorf("cannot start a running process") } @@ -279,7 +267,7 @@ func (s *runningState) SetExited(status int) { } } -func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -319,10 +307,6 @@ func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error return s.p.checkpoint(ctx, r) } -func (s *pausedState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - func (s *pausedState) Start(ctx context.Context) error { return errors.Errorf("cannot start a paused process") } @@ -347,7 +331,7 @@ func (s *pausedState) SetExited(status int) { } } -func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a paused state") } @@ -381,10 +365,6 @@ func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro return errors.Errorf("cannot checkpoint a stopped container") } -func (s *stoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") -} - func (s *stoppedState) Start(ctx context.Context) error { return errors.Errorf("cannot start a stopped process") } @@ -404,6 +384,6 @@ func (s *stoppedState) SetExited(status int) { // no op } -func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a stopped state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go b/vendor/github.com/containerd/containerd/pkg/process/io.go similarity index 89% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go rename to vendor/github.com/containerd/containerd/pkg/process/io.go index 0096db7169fd7..169f6c8e2072f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go +++ b/vendor/github.com/containerd/containerd/pkg/process/io.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -32,7 +32,7 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" "github.com/pkg/errors" @@ -50,7 +50,7 @@ type processIO struct { uri *url.URL copy bool - stdio proc.Stdio + stdio stdio.Stdio } func (p *processIO) Close() error { @@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error { return nil } -func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) { +func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdio) (*processIO, error) { pio := &processIO{ stdio: stdio, } @@ -101,17 +101,20 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) case "binary": - pio.io, err = newBinaryIO(ctx, id, u) + pio.io, err = NewBinaryIO(ctx, id, u) case "file": - if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil { + filePath := u.Path + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { return nil, err } var f *os.File - f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return nil, err } f.Close() + pio.stdio.Stdout = filePath + pio.stdio.Stderr = filePath pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) default: @@ -179,10 +182,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w ) if ok { if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name) } if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name) } } else { if sameFile != nil { @@ -191,7 +194,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w continue } if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name) } if stdout == stderr { sameFile = &countingWriteCloser{ @@ -251,7 +254,8 @@ func isFifo(path string) (bool, error) { return false, nil } -func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { +// NewBinaryIO runs a custom binary process for pluggable shim logging +func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -264,7 +268,7 @@ func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) } } ctx, cancel := context.WithCancel(ctx) - cmd := exec.CommandContext(ctx, uri.Host, args...) + cmd := exec.CommandContext(ctx, uri.Path, args...) cmd.Env = append(cmd.Env, "CONTAINER_ID="+id, "CONTAINER_NAMESPACE="+ns, diff --git a/vendor/github.com/containerd/containerd/runtime/proc/proc.go b/vendor/github.com/containerd/containerd/pkg/process/process.go similarity index 70% rename from vendor/github.com/containerd/containerd/runtime/proc/proc.go rename to vendor/github.com/containerd/containerd/pkg/process/process.go index 0e8d21b749367..7cebb9b309072 100644 --- a/vendor/github.com/containerd/containerd/runtime/proc/proc.go +++ b/vendor/github.com/containerd/containerd/pkg/process/process.go @@ -14,30 +14,17 @@ limitations under the License. */ -package proc +package process import ( "context" "io" - "sync" "time" "github.com/containerd/console" + "github.com/containerd/containerd/pkg/stdio" ) -// Stdio of a process -type Stdio struct { - Stdin string - Stdout string - Stderr string - Terminal bool -} - -// IsNull returns true if the stdio is not defined -func (s Stdio) IsNull() bool { - return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" -} - // Process on a system type Process interface { // ID returns the id for the process @@ -51,7 +38,7 @@ type Process interface { // Stdin returns the process STDIN Stdin() io.Closer // Stdio returns io information for the container - Stdio() Stdio + Stdio() stdio.Stdio // Status returns the process status Status(context.Context) (string, error) // Wait blocks until the process has exited @@ -67,12 +54,3 @@ type Process interface { // SetExited sets the exit status for the process SetExited(status int) } - -// Platform handles platform-specific behavior that may differs across -// platform implementations -type Platform interface { - CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, - wg *sync.WaitGroup) (console.Console, error) - ShutdownConsole(ctx context.Context, console console.Console) error - Close() error -} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go b/vendor/github.com/containerd/containerd/pkg/process/types.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go rename to vendor/github.com/containerd/containerd/pkg/process/types.go index 5d705c030f26d..03477038ab97f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go +++ b/vendor/github.com/containerd/containerd/pkg/process/types.go @@ -14,7 +14,7 @@ limitations under the License. */ -package proc +package process import ( google_protobuf "github.com/gogo/protobuf/types" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go b/vendor/github.com/containerd/containerd/pkg/process/utils.go similarity index 70% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go rename to vendor/github.com/containerd/containerd/pkg/process/utils.go index 75927a4ef4017..b0ac6333c8501 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go +++ b/vendor/github.com/containerd/containerd/pkg/process/utils.go @@ -16,9 +16,10 @@ limitations under the License. */ -package proc +package process import ( + "context" "encoding/json" "fmt" "io" @@ -34,6 +35,15 @@ import ( "golang.org/x/sys/unix" ) +const ( + // RuncRoot is the path to the root runc state directory + RuncRoot = "/run/containerd/runc" + // StoppedPID is the pid assigned after a container has run and stopped + StoppedPID = -1 + // InitPidFile name of the file that contains the init pid + InitPidFile = "init.pid" +) + // safePid is a thread safe wrapper for pid. type safePid struct { sync.Mutex @@ -46,6 +56,12 @@ func (s *safePid) get() int { return s.pid } +func (s *safePid) set(pid int) { + s.Lock() + s.pid = pid + s.Unlock() +} + // TODO(mlaventure): move to runc package? func getLastRuntimeError(r *runc.Runc) (string, error) { if r.Log == "" { @@ -56,6 +72,7 @@ func getLastRuntimeError(r *runc.Runc) (string, error) { if err != nil { return "", err } + defer f.Close() var ( errMsg string @@ -116,9 +133,6 @@ func checkKillError(err error) error { return errors.Wrapf(err, "unknown error after kill") } -// InitPidFile name of the file that contains the init pid -const InitPidFile = "init.pid" - func newPidFile(bundle string) *pidFile { return &pidFile{ path: filepath.Join(bundle, InitPidFile), @@ -142,3 +156,37 @@ func (p *pidFile) Path() string { func (p *pidFile) Read() (int, error) { return runc.ReadPidFile(p.path) } + +// waitTimeout handles waiting on a waitgroup with a specified timeout. +// this is commonly used for waiting on IO to finish after a process has exited +func waitTimeout(ctx context.Context, wg *sync.WaitGroup, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + done := make(chan struct{}, 1) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func stateName(v interface{}) string { + switch v.(type) { + case *runningState, *execRunningState: + return "running" + case *createdState, *execCreatedState, *createdCheckpointState: + return "created" + case *pausedState: + return "paused" + case *deletedState: + return "deleted" + case *stoppedState: + return "stopped" + } + panic(errors.Errorf("invalid state %v", v)) +} diff --git a/vendor/github.com/containerd/continuity/hardlinks_windows.go b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go similarity index 58% rename from vendor/github.com/containerd/continuity/hardlinks_windows.go rename to vendor/github.com/containerd/containerd/pkg/stdio/platform.go index 5893f4e1ae26c..6e1b27cfad0f0 100644 --- a/vendor/github.com/containerd/continuity/hardlinks_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go @@ -14,15 +14,20 @@ limitations under the License. */ -package continuity +package stdio -import "os" +import ( + "context" + "sync" -type hardlinkKey struct{} + "github.com/containerd/console" +) -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - // NOTE(stevvooe): Obviously, this is not yet implemented. However, the - // makings of an implementation are available in src/os/types_windows.go. More - // investigation needs to be done to figure out exactly how to do this. - return hardlinkKey{}, errNotAHardLink +// Platform handles platform-specific behavior that may differs across +// platform implementations +type Platform interface { + CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, + wg *sync.WaitGroup) (console.Console, error) + ShutdownConsole(ctx context.Context, console console.Console) error + Close() error } diff --git a/vendor/github.com/containerd/continuity/resource_windows.go b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go similarity index 68% rename from vendor/github.com/containerd/continuity/resource_windows.go rename to vendor/github.com/containerd/containerd/pkg/stdio/stdio.go index f9801801cfc9b..b02e77dcdd7dd 100644 --- a/vendor/github.com/containerd/continuity/resource_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go @@ -14,15 +14,17 @@ limitations under the License. */ -package continuity +package stdio -import "os" +// Stdio of a process +type Stdio struct { + Stdin string + Stdout string + Stderr string + Terminal bool +} -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - }, nil +// IsNull returns true if the stdio is not defined +func (s Stdio) IsNull() bool { + return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" } diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index 8259bbc851c48..3ad22a10d0ce1 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -29,11 +29,48 @@ type MatchComparer interface { // Only returns a match comparer for a single platform // using default resolution logic for the platform. // +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) // For ARMv7, will also match ARMv6 and ARMv5 // For ARMv6, will also match ARMv5 func Only(platform specs.Platform) MatchComparer { platform = Normalize(platform) if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } if platform.Variant == "v7" { return orderedPlatformComparer{ matchers: []Matcher{ diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index bf6476b6419a4..69b336d67f772 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -97,7 +97,7 @@ func getCPUVariant() string { } switch variant { - case "8": + case "8", "AArch64": variant = "v8" case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 9ae8bbeb5ff68..c7d2724140538 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -30,7 +30,8 @@ var ( ErrNoType = errors.New("plugin: no type") // ErrNoPluginID is returned when no id is specified ErrNoPluginID = errors.New("plugin: no id") - + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, // this allows the plugin loader differentiate between a plugin which is configured // not to load and one that fails to load. @@ -100,6 +101,8 @@ type Registration struct { // context are passed in. The init function may modify the registration to // add exports, capabilities and platform support declarations. InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool } // Init the registered plugin @@ -157,12 +160,16 @@ func Load(path string) (err error) { func Register(r *Registration) { register.Lock() defer register.Unlock() + if r.Type == "" { panic(ErrNoType) } if r.ID == "" { panic(ErrNoPluginID) } + if err := checkUnique(r); err != nil { + panic(err) + } var last bool for _, requires := range r.Requires { @@ -177,24 +184,36 @@ func Register(r *Registration) { register.r = append(register.r, r) } +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return errors.Wrap(ErrIDRegistered, r.URI()) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + // Graph returns an ordered list of registered plugins for initialization. // Plugins in disableList specified by id will be disabled. -func Graph(disableList []string) (ordered []*Registration) { +func Graph(filter DisableFilter) (ordered []*Registration) { register.RLock() defer register.RUnlock() - for _, d := range disableList { - for i, r := range register.r { - if r.ID == d { - register.r = append(register.r[:i], register.r[i+1:]...) - break - } + + for _, r := range register.r { + if filter(r) { + r.Disable = true } } added := map[*Registration]bool{} for _, r := range register.r { - - children(r.ID, r.Requires, added, &ordered) + if r.Disable { + continue + } + children(r, added, &ordered) if !added[r] { ordered = append(ordered, r) added[r] = true @@ -203,11 +222,13 @@ func Graph(disableList []string) (ordered []*Registration) { return ordered } -func children(id string, types []Type, added map[*Registration]bool, ordered *[]*Registration) { - for _, t := range types { +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { for _, r := range register.r { - if r.ID != id && (t == "*" || r.Type == t) { - children(r.ID, r.Requires, added, ordered) + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) if !added[r] { *ordered = append(*ordered, r) added[r] = true diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 693dcafe1da21..3a91daba4472a 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -69,7 +69,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) if pullCtx.Unpack { - if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil { + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) } } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 73adb5a2f16f9..9652d3ac1b3e8 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -40,126 +40,278 @@ type dockerAuthorizer struct { credentials func(string) (string, string, error) client *http.Client + header http.Header mu sync.Mutex - auth map[string]string + // indexed by host name + handlers map[string]*authHandler } // NewAuthorizer creates a Docker authorizer using the provided function to // get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - if client == nil { - client = http.DefaultClient + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + return &dockerAuthorizer{ - credentials: f, - client: client, - auth: map[string]string{}, + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), } } +// Authorize handles auth request. func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - // TODO: Lookup matching challenge and scope rather than just host - if auth := a.getAuth(req.URL.Host); auth != "" { - req.Header.Set("Authorization", auth) + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil } + auth, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) return nil } +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { last := responses[len(responses)-1] host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() for _, c := range parseAuthHeader(last.Header) { if c.scheme == bearerAuth { if err := invalidAuthorization(c, responses); err != nil { - // TODO: Clear token - a.setAuth(host, "") + delete(a.handlers, host) return err } - // TODO(dmcg): Store challenge, not token - // Move token fetching to authorize - return a.setTokenAuth(ctx, host, c.parameters) + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + common, err := a.generateTokenOptions(ctx, host, c) + if err != nil { + return err + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + return nil } else if c.scheme == basicAuth && a.credentials != nil { - // TODO: Resolve credentials on authorize username, secret, err := a.credentials(host) if err != nil { return err } + if username != "" && secret != "" { - auth := username + ":" + secret - a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth)))) + common := tokenOptions{ + username: username, + secret: secret, + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) return nil } } } - return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") } -func (a *dockerAuthorizer) getAuth(host string) string { - a.mu.Lock() - defer a.mu.Unlock() - - return a.auth[host] -} - -func (a *dockerAuthorizer) setAuth(host string, auth string) bool { - a.mu.Lock() - defer a.mu.Unlock() - - changed := a.auth[host] != auth - a.auth[host] = auth - - return changed -} - -func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error { - realm, ok := params["realm"] +func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { + realm, ok := c.parameters["realm"] if !ok { - return errors.New("no realm specified for token auth challenge") + return tokenOptions{}, errors.New("no realm specified for token auth challenge") } realmURL, err := url.Parse(realm) if err != nil { - return errors.Wrap(err, "invalid token auth challenge realm") + return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") } to := tokenOptions{ realm: realmURL.String(), - service: params["service"], + service: c.parameters["service"], } - to.scopes = getTokenScopes(ctx, params) - if len(to.scopes) == 0 { - return errors.Errorf("no scope specified for token auth challenge") + scope, ok := c.parameters["scope"] + if !ok { + return tokenOptions{}, errors.Errorf("no scope specified for token auth challenge") } + to.scopes = append(to.scopes, scope) if a.credentials != nil { to.username, to.secret, err = a.credentials(host) if err != nil { - return err + return tokenOptions{}, err } } + return to, nil +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme authenticationScheme + + // common contains common challenge answer + common tokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, error) { + switch ah.scheme { + case basicAuth: + return ah.doBasicAuth(ctx) + case bearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.username, ah.common.secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} - var token string +func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { + // copy common tokenOptions + to := ah.common + + to.scopes = getTokenScopes(ctx, to.scopes) + if len(to.scopes) == 0 { + return "", errors.Errorf("no scope specified for token auth challenge") + } + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + // fetch token for the resource scope + var ( + token string + err error + ) if to.secret != "" { - // Credential information is provided, use oauth POST endpoint - token, err = a.fetchTokenWithOAuth(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch oauth token") - } + // credential information is provided, use oauth POST endpoint + token, err = ah.fetchTokenWithOAuth(ctx, to) + err = errors.Wrap(err, "failed to fetch oauth token") } else { - // Do request anonymously - token, err = a.fetchToken(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch anonymous token") - } + // do request anonymously + token, err = ah.fetchToken(ctx, to) + err = errors.Wrap(err, "failed to fetch anonymous token") } - a.setAuth(host, fmt.Sprintf("Bearer %s", token)) + token = fmt.Sprintf("Bearer %s", token) - return nil + r.token, r.err = token, err + r.Done() + return r.token, r.err } type tokenOptions struct { @@ -178,7 +330,7 @@ type postTokenResponse struct { Scope string `json:"scope"` } -func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { +func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { form := url.Values{} form.Set("scope", strings.Join(to.scopes, " ")) form.Set("service", to.service) @@ -194,11 +346,18 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti form.Set("password", to.secret) } - resp, err := ctxhttp.Post( - ctx, a.client, to.realm, - "application/x-www-form-urlencoded; charset=utf-8", - strings.NewReader(form.Encode()), - ) + req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode())) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } @@ -208,7 +367,7 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return a.fetchToken(ctx, to) + return ah.fetchToken(ctx, to) } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB log.G(ctx).WithFields(logrus.Fields{ @@ -237,13 +396,19 @@ type getTokenResponse struct { RefreshToken string `json:"refresh_token"` } -// getToken fetches a token using a GET request -func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) { +// fetchToken fetches a token using a GET request +func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { req, err := http.NewRequest("GET", to.realm, nil) if err != nil { return "", err } + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + reqParams := req.URL.Query() if to.service != "" { @@ -260,7 +425,7 @@ func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (str req.URL.RawQuery = reqParams.Encode() - resp, err := ctxhttp.Do(ctx, a.client, req) + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 6f06b0e50c850..ce3da55245e61 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -23,7 +23,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "github.com/containerd/containerd/errdefs" @@ -32,7 +32,6 @@ import ( "github.com/docker/distribution/registry/api/errcode" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type dockerFetcher struct { @@ -40,26 +39,46 @@ type dockerFetcher struct { } func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields( - logrus.Fields{ - "base": r.base.String(), - "digest": desc.Digest, - }, - )) - - urls, err := r.getV2URLPaths(ctx, desc) - if err != nil { - return nil, err + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") } - ctx, err = contextWithRepositoryScope(ctx, r.refspec, false) + ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) if err != nil { return nil, err } return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - for _, u := range urls { - rc, err := r.open(ctx, u, desc.MediaType, offset) + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) if err != nil { if errdefs.IsNotFound(err) { continue // try one of the other urls. @@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return rc, nil } + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + } + + // Finally use blobs endpoints + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "could not fetch content descriptor %v (%v) from remote", desc.Digest, desc.MediaType) @@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } -func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints // will return the header without supporting the range. The content // range must always be checked. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } - resp, err := r.doRequestWithRetries(ctx, req, nil) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return nil, err } @@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) } var registryErr errcode.Errors if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error()) + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) } if offset > 0 { cr := resp.Header.Get("content-range") @@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int return resp.Body, nil } - -// getV2URLPaths generates the candidate urls paths for the object based on the -// set of hints and the provided object id. URLs are returned in the order of -// most to least likely succeed. -func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) { - var urls []string - - if len(desc.URLs) > 0 { - // handle fetch via external urls. - for _, u := range desc.URLs { - log.G(ctx).WithField("url", u).Debug("adding alternative url") - urls = append(urls, u) - } - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - urls = append(urls, r.url(path.Join("manifests", desc.Digest.String()))) - } - - // always fallback to attempting to get the object out of the blobs store. - urls = append(urls, r.url(path.Join("blobs", desc.Digest.String()))) - - return urls, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go index 1a355783b8953..529cfbc274bd2 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -110,3 +110,45 @@ func appendDistributionSourceLabel(originLabel, repo string) string { func distributionSourceLabelKey(source string) string { return fmt.Sprintf("%s.%s", labelDistributionSource, source) } + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index c3c0923f08ac1..600868467abfc 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -21,7 +21,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "time" @@ -37,7 +37,7 @@ import ( type dockerPusher struct { *dockerBase - tag string + object string // TODO: namespace tracker tracker StatusTracker @@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Wrap(err, "failed to get status") } + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + var ( isManifest bool - existCheck string + existCheck []string + host = hosts[0] ) switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: isManifest = true - if p.tag == "" { - existCheck = path.Join("manifests", desc.Digest.String()) - } else { - existCheck = path.Join("manifests", p.tag) - } + existCheck = getManifestPath(p.object, desc.Digest) default: - existCheck = path.Join("blobs", desc.Digest.String()) + existCheck = []string{"blobs", desc.Digest.String()} } - req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil) - if err != nil { - return nil, err - } + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - resp, err := p.doRequestWithRetries(ctx, req, nil) + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) if err != nil { if errors.Cause(err) != ErrInvalidAuthorization { return nil, err @@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } else { if resp.StatusCode == http.StatusOK { var exists bool - if isManifest && p.tag != "" { + if isManifest && existCheck[1] != desc.Digest.String() { dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) if dgstHeader == desc.Digest { exists = true @@ -116,67 +117,94 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } } - // TODO: Lookup related objects for cross repository push - if isManifest { - var putPath string - if p.tag != "" { - putPath = path.Join("manifests", p.tag) - } else { - putPath = path.Join("manifests", desc.Digest.String()) - } - - req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil) - if err != nil { - return nil, err - } - req.Header.Add("Content-Type", desc.MediaType) + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) } else { - // TODO: Do monolithic upload if size is small - // Start upload request - req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil) - if err != nil { - return nil, err + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.do(pctx) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + } } - resp, err := p.doRequestWithRetries(ctx, req, nil) - if err != nil { - return nil, err + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } } switch resp.StatusCode { case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) default: // TODO: log error return nil, errors.Errorf("unexpected response: %s", resp.Status) } - location := resp.Header.Get("Location") + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) // Support paths without host in location if strings.HasPrefix(location, "/") { - // Support location string containing path and query - qmIndex := strings.Index(location, "?") - if qmIndex > 0 { - u := p.base - u.Path = location[:qmIndex] - u.RawQuery = location[qmIndex+1:] - location = u.String() - } else { - u := p.base - u.Path = location - location = u.String() + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) } - } - req, err = http.NewRequest(http.MethodPut, location, nil) - if err != nil { - return nil, err + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + } } - q := req.URL.Query() + q := lurl.Query() q.Add("digest", desc.Digest.String()) - req.URL.RawQuery = q.Encode() + req = p.request(lhost, http.MethodPut) + req.path = lurl.Path + "?" + q.Encode() } p.tracker.SetStatus(ref, Status{ Status: content.Status{ @@ -191,13 +219,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten pr, pw := io.Pipe() respC := make(chan *http.Response, 1) + body := ioutil.NopCloser(pr) - req.Body = ioutil.NopCloser(pr) - req.ContentLength = desc.Size + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size go func() { defer close(respC) - resp, err = p.doRequest(ctx, req) + resp, err = req.do(ctx) if err != nil { pr.CloseWithError(err) return @@ -223,6 +260,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten }, nil } +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + type pushWriter struct { base *dockerBase ref string @@ -296,7 +352,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di } if size > 0 && size != status.Offset { - return errors.Errorf("unxpected size %d, expected %d", status.Offset, size) + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) } if expected == "" { @@ -320,3 +376,16 @@ func (pw *pushWriter) Truncate(size int64) error { // TODO: always error on manifest return errors.New("cannot truncate remote upload") } + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 0000000000000..ae24f41e10511 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +func MatchLocalhost(host string) (bool, error) { + for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { + if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { + return true, nil + } + } + return host == "::1", nil + +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 00e1c85568d32..8e65d8cccb266 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,9 +18,10 @@ package docker import ( "context" + "fmt" "io" + "io/ioutil" "net/http" - "net/url" "path" "strings" @@ -46,6 +47,19 @@ var ( // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 ) // Authorizer is used to authorize HTTP requests based on 401 HTTP responses. @@ -72,31 +86,38 @@ type Authorizer interface { // ResolverOptions are used to configured a new Docker register resolver type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts Authorizer Authorizer // Credentials provides username and secret given a host. // If username is empty but a secret is given, that secret // is interpreted as a long lived token. - // Deprecated: use Authorizer + // Deprecated: use Hosts Credentials func(string) (string, string, error) // Host provides the hostname given a namespace. + // Deprecated: use Hosts Host func(string) (string, error) - // Headers are the HTTP request header fields sent by the resolver - Headers http.Header - // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts PlainHTTP bool // Client is the http client to used when making registry requests + // Deprecated: use Hosts Client *http.Client - - // Tracker is used to track uploads to the registry. This is used - // since the registry does not have upload tracking and the existing - // mechanism for getting blob upload status is expensive. - Tracker StatusTracker } // DefaultHost is the default host function. @@ -108,12 +129,10 @@ func DefaultHost(ns string) (string, error) { } type dockerResolver struct { - auth Authorizer - host func(string) (string, error) - headers http.Header - plainHTTP bool - client *http.Client - tracker StatusTracker + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker } // NewResolver returns a new resolver to a Docker registry @@ -121,33 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Tracker == nil { options.Tracker = NewInMemoryTracker() } - if options.Host == nil { - options.Host = DefaultHost - } + if options.Headers == nil { options.Headers = make(http.Header) } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} if _, ok := options.Headers["Accept"]; !ok { // set headers for all the types we support for resolution. - options.Headers.Set("Accept", strings.Join([]string{ + resolveHeader.Set("Accept", strings.Join([]string{ images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex, "*"}, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") } - if _, ok := options.Headers["User-Agent"]; !ok { - options.Headers.Set("User-Agent", "containerd/"+version.Version) - } - if options.Authorizer == nil { - options.Authorizer = NewAuthorizer(options.Client, options.Credentials) + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) } return &dockerResolver{ - auth: options.Authorizer, - host: options.Host, - headers: options.Headers, - plainHTTP: options.PlainHTTP, - client: options.Client, - tracker: options.Tracker, + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, } } @@ -194,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } - fetcher := dockerFetcher{ - dockerBase: base, - } - var ( - urls []string - dgst = refspec.Digest() + lastErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull ) if dgst != "" { @@ -211,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } // turns out, we have a valid digest, make a url. - urls = append(urls, fetcher.url("manifests", dgst.String())) + paths = append(paths, []string{"manifests", dgst.String()}) // fallback to blobs on not found. - urls = append(urls, fetcher.url("blobs", dgst.String())) + paths = append(paths, []string{"blobs", dgst.String()}) } else { - urls = append(urls, fetcher.url("manifests", refspec.Object)) + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") } ctx, err = contextWithRepositoryScope(ctx, refspec, false) if err != nil { return "", ocispec.Descriptor{}, err } - for _, u := range urls { - req, err := http.NewRequest(http.MethodHead, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - log.G(ctx).Debug("resolving") - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - if errors.Cause(err) == ErrInvalidAuthorization { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") - } - return "", ocispec.Descriptor{}, err - } - resp.Body.Close() // don't care about body contents. - - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { - continue + req := base.request(host, http.MethodHead, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) } - return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) - } - size := resp.ContentLength - - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - contentType := getManifestMediaType(resp) - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) - } - dgst = dgstHeader - } else { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req, err := http.NewRequest(http.MethodGet, u, nil) + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) if err != nil { + if errors.Cause(err) == ErrInvalidAuthorization { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } return "", ocispec.Descriptor{}, err } - req.Header = r.headers + resp.Body.Close() // don't care about body contents. - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - return "", ocispec.Descriptor{}, err + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + continue + } + return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) } - defer resp.Body.Close() + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - bodyReader := countingReader{reader: resp.Body} + req = base.request(host, http.MethodGet, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } - contentType = getManifestMediaType(resp) - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return "", ocispec.Descriptor{}, err } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { return "", ocispec.Descriptor{}, err } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if lastErr == nil { + lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, } - size = bodyReader.bytesRead - } - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil } + } - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil + if lastErr == nil { + lastErr = errors.Wrap(errdefs.ErrNotFound, ref) } - return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref) + return "", ocispec.Descriptor{}, lastErr } func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { @@ -329,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return nil, err } - // Manifests can be pushed by digest like any other object, but the passed in - // reference cannot take a digest without the associated content. A tag is allowed - // and will be used to tag pushed manifests. - if refspec.Object != "" && strings.Contains(refspec.Object, "@") { - return nil, errors.New("cannot use digest reference for push locator") - } - base, err := r.base(refspec) if err != nil { return nil, err @@ -343,60 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return dockerPusher{ dockerBase: base, - tag: refspec.Object, + object: refspec.Object, tracker: r.tracker, }, nil } type dockerBase struct { - refspec reference.Spec - base url.URL - - client *http.Client - auth Authorizer + refspec reference.Spec + namespace string + hosts []RegistryHost + header http.Header } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - var ( - err error - base url.URL - ) - host := refspec.Hostname() - base.Host = host - if r.host != nil { - base.Host, err = r.host(host) - if err != nil { - return nil, err - } - } - - base.Scheme = "https" - if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") { - base.Scheme = "http" + hosts, err := r.hosts(host) + if err != nil { + return nil, err } - - prefix := strings.TrimPrefix(refspec.Locator, host+"/") - base.Path = path.Join("/v2", prefix) - return &dockerBase{ - refspec: refspec, - base: base, - client: r.client, - auth: r.auth, + refspec: refspec, + namespace: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, }, nil } -func (r *dockerBase) url(ps ...string) string { - url := r.base - url.Path = path.Join(url.Path, path.Join(ps...)) - return url.String() +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := http.Header{} + for key, value := range r.header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.namespace}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } } -func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { +func (r *request) authorize(ctx context.Context, req *http.Request) error { // Check if has header for host - if r.auth != nil { - if err := r.auth.Authorize(ctx, req); err != nil { + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { return err } } @@ -404,80 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { return nil } -func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) - log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = r.header + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") if err := r.authorize(ctx, req); err != nil { return nil, errors.Wrap(err, "failed to authorize") } - resp, err := ctxhttp.Do(ctx, r.client, req) + resp, err := ctxhttp.Do(ctx, r.host.Client, req) if err != nil { return nil, errors.Wrap(err, "failed to do request") } - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "response.headers": resp.Header, - }).Debug("fetch response received") + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } -func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { - resp, err := r.doRequest(ctx, req) +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) if err != nil { return nil, err } responses = append(responses, resp) - req, err = r.retryRequest(ctx, req, responses) + retry, err := r.retryRequest(ctx, responses) if err != nil { resp.Body.Close() return nil, err } - if req != nil { + if retry { resp.Body.Close() - return r.doRequestWithRetries(ctx, req, responses) + return r.doWithRetries(ctx, responses) } return resp, err } -func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) { +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { if len(responses) > 5 { - return nil, nil + return false, nil } last := responses[len(responses)-1] - if last.StatusCode == http.StatusUnauthorized { + switch last.StatusCode { + case http.StatusUnauthorized: log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.auth != nil { - if err := r.auth.AddResponses(ctx, responses); err == nil { - return copyRequest(req) + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil } else if !errdefs.IsNotImplemented(err) { - return nil, err + return false, err } } - return nil, nil - } else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead { + return false, nil + case http.StatusMethodNotAllowed: // Support registries which have not properly implemented the HEAD method for // manifests endpoint - if strings.Contains(req.URL.Path, "/manifests/") { - // TODO: copy request? - req.Method = http.MethodGet - return copyRequest(req) + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil } // TODO: Handle 50x errors accounting for attempt history - return nil, nil + return false, nil } -func copyRequest(req *http.Request) (*http.Request, error) { - ireq := *req - if ireq.GetBody != nil { - var err error - ireq.Body, err = ireq.GetBody() - if err != nil { - return nil, err +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v } } - return &ireq, nil + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 29b41cc143702..8314c01d5a6fc 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -216,12 +216,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De ref := remotes.MakeRefKey(ctx, desc) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") } ref = remotes.MakeRefKey(ctx, config) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") } return desc, nil diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go index 52c2443118846..86bd81bf5329a 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -18,6 +18,7 @@ package docker import ( "context" + "fmt" "net/url" "sort" "strings" @@ -53,24 +54,38 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil } -// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"]. -func getTokenScopes(ctx context.Context, params map[string]string) []string { +// contextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + var scopes []string + + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = append(scopes, v.([]string)...) + } + scopes = append(scopes, fmt.Sprintf("repository:%s:pull", repo)) + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func getTokenScopes(ctx context.Context, common []string) []string { var scopes []string if x := ctx.Value(tokenScopesKey{}); x != nil { scopes = append(scopes, x.([]string)...) } - if scope, ok := params["scope"]; ok { - for _, s := range scopes { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) - // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. - if s == scope { - // already appended - goto Sort - } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue } - scopes = append(scopes, scope) + + l++ + scopes[l] = scopes[idx] } -Sort: - sort.Strings(scopes) - return scopes + return scopes[:l+1] } diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 0ee56c887ad6e..4dee1f61c8a1c 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -48,7 +48,8 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip, + images.MediaTypeDockerSchema2LayerEnc, images.MediaTypeDockerSchema2LayerGzipEnc: return "layer-" + desc.Digest.String() case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: return "config-" + desc.Digest.String() @@ -156,7 +157,7 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex manifestStack := []ocispec.Descriptor{} @@ -173,10 +174,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr } }) - pushHandler := PushHandler(pusher, provider) + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) var handler images.Handler = images.Handlers( - images.FilterPlatforms(images.ChildrenHandler(provider), platform), + annotateHandler, filterHandler, pushHandler, ) @@ -241,3 +246,45 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) return descs, nil } } + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := manager.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index 3ea830f6b4236..73d4ccca52e0c 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -48,6 +48,14 @@ type Layer struct { // Layers are applied in order they are given, making the first layer the // bottom-most layer in the layer chain. func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { + return ApplyLayersWithOpts(ctx, layers, sn, a, nil) +} + +// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) { chain := make([]digest.Digest, len(layers)) for i, layer := range layers { chain[i] = layer.Diff.Digest @@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) { + if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } } @@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, // using the provided snapshotter and applier. If the layer was unpacked true // is returned, if the layer already exists false is returned. func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { + return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil) +} + +// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain, +// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) { var ( chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() applied bool @@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil { + if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return false, err } @@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap } } return applied, nil + } -func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error { +func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error { var ( parent = identity.ChainID(chain[:len(chain)-1]) chainID = identity.ChainID(chain) @@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) if err != nil { if errdefs.IsNotFound(err) && len(layers) > 1 { - if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil { + if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return err } @@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn } }() - diff, err = a.Apply(ctx, layer.Blob, mounts) + diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) if err != nil { err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) return err diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go index 981e290c68d12..ab9017ba58a7e 100644 --- a/vendor/github.com/containerd/containerd/runtime/task.go +++ b/vendor/github.com/containerd/containerd/runtime/task.go @@ -33,6 +33,7 @@ type TaskInfo struct { // Process is a runtime object for an executing process inside a container type Process interface { + // ID of the process ID() string // State returns the process state State(context.Context) (State, error) @@ -54,6 +55,8 @@ type Process interface { type Task interface { Process + // PID of the process + PID() uint32 // Namespace that the task exists in Namespace() string // Pause pauses the container process diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go index ae0e73f287800..e8b629b79c56c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go @@ -65,6 +65,10 @@ func newBundle(id, path, workDir string, spec []byte) (b *bundle, err error) { os.RemoveAll(workDir) } }() + rootfs := filepath.Join(path, "rootfs") + if err := os.MkdirAll(rootfs, 0711); err != nil { + return nil, err + } err = ioutil.WriteFile(filepath.Join(path, configFilename), spec, 0666) return &bundle{ id: id, @@ -179,6 +183,9 @@ func atomicDelete(path string) error { // create a hidden dir for an atomic removal atomicPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) if err := os.Rename(path, atomicPath); err != nil { + if os.IsNotExist(err) { + return nil + } return err } return os.RemoveAll(atomicPath) diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go index c408126aee720..0243c3986b2ce 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go @@ -37,12 +37,12 @@ import ( "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/containerd/containerd/runtime/v1" - "github.com/containerd/containerd/runtime/v1/linux/proc" + v1 "github.com/containerd/containerd/runtime/v1" shim "github.com/containerd/containerd/runtime/v1/shim/v1" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" @@ -191,18 +191,13 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts } exitHandler := func() { log.G(ctx).WithField("id", id).Info("shim reaped") - t, err := r.tasks.Get(ctx, id) - if err != nil { + + if _, err := r.tasks.Get(ctx, id); err != nil { // Task was never started or was already successfully deleted return } - lc := t.(*Task) - log.G(ctx).WithFields(logrus.Fields{ - "id": id, - "namespace": namespace, - }).Warn("cleaning up after killed shim") - if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid); err != nil { + if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id); err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "id": id, "namespace": namespace, @@ -330,20 +325,26 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { continue } id := path.Name() + // skip hidden directories + if len(id) > 0 && id[0] == '.' { + continue + } bundle := loadBundle( id, filepath.Join(r.state, ns, id), filepath.Join(r.root, ns, id), ) ctx = namespaces.WithNamespace(ctx, ns) - pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) + shimExit := make(chan struct{}) s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() { - _, err := r.tasks.Get(ctx, id) - if err != nil { + defer close(shimExit) + if _, err := r.tasks.Get(ctx, id); err != nil { // Task was never started or was already successfully deleted return } - if err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid); err != nil { + + if err := r.cleanupAfterDeadShim(ctx, bundle, ns, id); err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") } @@ -353,7 +354,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { "id": id, "namespace": ns, }).Error("connecting to shim") - err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) + err := r.cleanupAfterDeadShim(ctx, bundle, ns, id) if err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") @@ -363,6 +364,18 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { logDirPath := filepath.Join(r.root, ns, id) + copyAndClose := func(dst io.Writer, src io.ReadWriteCloser) { + copyDone := make(chan struct{}) + go func() { + io.Copy(dst, src) + close(copyDone) + }() + select { + case <-shimExit: + case <-copyDone: + } + src.Close() + } shimStdoutLog, err := v1.OpenShimStdoutLog(ctx, logDirPath) if err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ @@ -372,7 +385,11 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { }).Error("opening shim stdout log pipe") continue } - go io.Copy(os.Stdout, shimStdoutLog) + if r.config.ShimDebug { + go copyAndClose(os.Stdout, shimStdoutLog) + } else { + go copyAndClose(ioutil.Discard, shimStdoutLog) + } shimStderrLog, err := v1.OpenShimStderrLog(ctx, logDirPath) if err != nil { @@ -383,7 +400,11 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { }).Error("opening shim stderr log pipe") continue } - go io.Copy(os.Stderr, shimStderrLog) + if r.config.ShimDebug { + go copyAndClose(os.Stderr, shimStderrLog) + } else { + go copyAndClose(ioutil.Discard, shimStderrLog) + } t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle) if err != nil { @@ -395,7 +416,13 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { return o, nil } -func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int) error { +func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string) error { + log.G(ctx).WithFields(logrus.Fields{ + "id": id, + "namespace": ns, + }).Warn("cleaning up after shim dead") + + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) ctx = namespaces.WithNamespace(ctx, ns) if err := r.terminate(ctx, bundle, ns, id); err != nil { if r.config.ShimDebug { @@ -418,6 +445,10 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, if err := bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("delete bundle") } + // kill shim + if shimPid, err := runc.ReadPidFile(filepath.Join(bundle.path, "shim.pid")); err == nil && shimPid > 0 { + unix.Kill(shimPid, unix.SIGKILL) + } r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ ContainerID: id, @@ -456,7 +487,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er var ( cmd = r.config.Runtime - root = proc.RuncRoot + root = process.RuncRoot ) if ropts != nil { if ropts.Runtime != "" { diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go index e13255e955d2f..0970c3ea3b1a0 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go @@ -84,6 +84,11 @@ func (t *Task) Namespace() string { return t.namespace } +// PID of the task +func (t *Task) PID() uint32 { + return uint32(t.pid) +} + // Delete the task and return the exit status func (t *Task) Delete(ctx context.Context) (*runtime.Exit, error) { rsp, err := t.shim.Delete(ctx, empty) @@ -124,11 +129,15 @@ func (t *Task) Start(ctx context.Context) error { t.pid = int(r.Pid) if !hasCgroup { cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid)) - if err != nil { + if err != nil && err != cgroups.ErrCgroupDeleted { return err } t.mu.Lock() - t.cg = cg + if err == cgroups.ErrCgroupDeleted { + t.cg = nil + } else { + t.cg = cg + } t.mu.Unlock() } t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{ diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go index 6cdd9cfc2cd14..7c68248c52948 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go @@ -26,6 +26,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" "syscall" @@ -98,9 +99,9 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa cmd.Wait() exitHandler() if stdoutLog != nil { - stderrLog.Close() + stdoutLog.Close() } - if stdoutLog != nil { + if stderrLog != nil { stderrLog.Close() } }() @@ -110,7 +111,10 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa "debug": debug, }).Infof("shim %s started", binary) - if err := writeAddress(filepath.Join(config.Path, "address"), address); err != nil { + if err := writeFile(filepath.Join(config.Path, "address"), address); err != nil { + return nil, nil, err + } + if err := writeFile(filepath.Join(config.Path, "shim.pid"), strconv.Itoa(cmd.Process.Pid)); err != nil { return nil, nil, err } // set shim in cgroup if it is provided @@ -123,8 +127,8 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa "address": address, }).Infof("shim placed in cgroup %s", cgroup) } - if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil { - return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim") + if err = setupOOMScore(cmd.Process.Pid); err != nil { + return nil, nil, err } c, clo, err := WithConnect(address, func() {})(ctx, config) if err != nil { @@ -134,6 +138,21 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa } } +// setupOOMScore gets containerd's oom score and adds +1 to it +// to ensure a shim has a lower* score than the daemons +func setupOOMScore(shimPid int) error { + pid := os.Getpid() + score, err := sys.GetOOMScoreAdj(pid) + if err != nil { + return errors.Wrap(err, "get daemon OOM score") + } + shimScore := score + 1 + if err := sys.SetOOMScore(shimPid, shimScore); err != nil { + return errors.Wrap(err, "set shim OOM score") + } + return nil +} + func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) { selfExe, err := os.Executable() if err != nil { @@ -172,8 +191,8 @@ func newCommand(binary, daemonAddress string, debug bool, config shim.Config, so return cmd, nil } -// writeAddress writes a address file atomically -func writeAddress(path, address string) error { +// writeFile writes a address file atomically +func writeFile(path, address string) error { path, err := filepath.Abs(path) if err != nil { return err @@ -279,7 +298,7 @@ func (c *Client) KillShim(ctx context.Context) error { return c.signalShim(ctx, unix.SIGKILL) } -// Close the cient connection +// Close the client connection func (c *Client) Close() error { if c.c == nil { return nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go index 4d2578ad085b9..f557001350936 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go @@ -35,10 +35,10 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - rproc "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/containerd/runtime/v1/linux/proc" shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" @@ -84,7 +84,7 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { s := &Service{ config: config, context: ctx, - processes: make(map[string]rproc.Process), + processes: make(map[string]process.Process), events: make(chan interface{}, 128), ec: Default.Subscribe(), } @@ -102,9 +102,9 @@ type Service struct { config Config context context.Context - processes map[string]rproc.Process + processes map[string]process.Process events chan interface{} - platform rproc.Platform + platform stdio.Platform ec chan runc.Exit // Filled by Create() @@ -114,9 +114,9 @@ type Service struct { // Create a new initial process and container with the underlying OCI runtime func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { - var mounts []proc.Mount + var mounts []process.Mount for _, m := range r.Rootfs { - mounts = append(mounts, proc.Mount{ + mounts = append(mounts, process.Mount{ Type: m.Type, Source: m.Source, Target: m.Target, @@ -127,12 +127,12 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * rootfs := "" if len(mounts) > 0 { rootfs = filepath.Join(r.Bundle, "rootfs") - if err := os.Mkdir(rootfs, 0711); err != nil { + if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { return nil, err } } - config := &proc.CreateConfig{ + config := &process.CreateConfig{ ID: r.ID, Bundle: r.Bundle, Runtime: r.Runtime, @@ -266,7 +266,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + process, err := p.(*process.Init).Exec(ctx, s.config.Path, &process.ExecConfig{ ID: r.ID, Terminal: r.Terminal, Stdin: r.Stdin, @@ -348,7 +348,7 @@ func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, er if err != nil { return nil, err } - if err := p.(*proc.Init).Pause(ctx); err != nil { + if err := p.(*process.Init).Pause(ctx); err != nil { return nil, err } return empty, nil @@ -360,7 +360,7 @@ func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, e if err != nil { return nil, err } - if err := p.(*proc.Init).Resume(ctx); err != nil { + if err := p.(*process.Init).Resume(ctx); err != nil { return nil, err } return empty, nil @@ -448,7 +448,7 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque } options = *v.(*runctypes.CheckpointOptions) } - if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{ + if err := p.(*process.Init).Checkpoint(ctx, &process.CheckpointConfig{ Path: r.Path, Exit: options.Exit, AllowOpenTCP: options.OpenTcp, @@ -476,7 +476,7 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*pt if err != nil { return nil, err } - if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil { + if err := p.(*process.Init).Update(ctx, r.Resources); err != nil { return nil, errdefs.ToGRPC(err) } return empty, nil @@ -502,11 +502,11 @@ func (s *Service) processExits() { } } -func (s *Service) allProcesses() []rproc.Process { +func (s *Service) allProcesses() []process.Process { s.mu.Lock() defer s.mu.Unlock() - res := make([]rproc.Process, 0, len(s.processes)) + res := make([]process.Process, 0, len(s.processes)) for _, p := range s.processes { res = append(res, p) } @@ -523,7 +523,7 @@ func (s *Service) checkProcesses(e runc.Exit) { if p.Pid() == e.Pid { if shouldKillAll { - if ip, ok := p.(*proc.Init); ok { + if ip, ok := p.(*process.Init); ok { // Ensure all children are killed if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). @@ -554,7 +554,7 @@ func shouldKillAllOnExit(bundlePath string) (bool, error) { if bundleSpec.Linux != nil { for _, ns := range bundleSpec.Linux.Namespaces { - if ns.Type == specs.PIDNamespace { + if ns.Type == specs.PIDNamespace && ns.Path == "" { return false, nil } } @@ -569,7 +569,7 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er return nil, err } - ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + ps, err := p.(*process.Init).Runtime().Ps(ctx, id) if err != nil { return nil, err } @@ -589,7 +589,7 @@ func (s *Service) forward(publisher events.Publisher) { } // getInitProcess returns initial process -func (s *Service) getInitProcess() (rproc.Process, error) { +func (s *Service) getInitProcess() (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -601,7 +601,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) { } // getExecProcess returns exec process -func (s *Service) getExecProcess(id string) (rproc.Process, error) { +func (s *Service) getExecProcess(id string) (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -640,7 +640,7 @@ func getTopic(ctx context.Context, e interface{}) string { return runtime.TaskUnknownTopic } -func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform rproc.Platform, r *proc.CreateConfig, rootfs string) (*proc.Init, error) { +func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform stdio.Platform, r *process.CreateConfig, rootfs string) (*process.Init, error) { var options runctypes.CreateOptions if r.Options != nil { v, err := typeurl.UnmarshalAny(r.Options) @@ -650,8 +650,8 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu st options = *v.(*runctypes.CreateOptions) } - runtime := proc.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) - p := proc.New(r.ID, runtime, rproc.Stdio{ + runtime := process.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) + p := process.New(r.ID, runtime, stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, diff --git a/vendor/github.com/containerd/containerd/services/diff/local.go b/vendor/github.com/containerd/containerd/services/diff/local.go index 0cb6222c5a396..f05b222dba266 100644 --- a/vendor/github.com/containerd/containerd/services/diff/local.go +++ b/vendor/github.com/containerd/containerd/services/diff/local.go @@ -99,8 +99,13 @@ func (l *local) Apply(ctx context.Context, er *diffapi.ApplyRequest, _ ...grpc.C mounts = toMounts(er.Mounts) ) + var opts []diff.ApplyOpt + if er.Payloads != nil { + opts = append(opts, diff.WithPayloads(er.Payloads)) + } + for _, differ := range l.differs { - ocidesc, err = differ.Apply(ctx, desc, mounts) + ocidesc, err = differ.Apply(ctx, desc, mounts, opts...) if !errdefs.IsNotImplemented(err) { break } @@ -164,16 +169,18 @@ func toMounts(apim []*types.Mount) []mount.Mount { func toDescriptor(d *types.Descriptor) ocispec.Descriptor { return ocispec.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size: d.Size_, + MediaType: d.MediaType, + Digest: d.Digest, + Size: d.Size_, + Annotations: d.Annotations, } } func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { return &types.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size_: d.Size, + MediaType: d.MediaType, + Digest: d.Digest, + Size_: d.Size, + Annotations: d.Annotations, } } diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go index 8ad0d117e4f80..2d4ec76dc542b 100644 --- a/vendor/github.com/containerd/containerd/services/images/helpers.go +++ b/vendor/github.com/containerd/containerd/services/images/helpers.go @@ -55,16 +55,18 @@ func imageFromProto(imagepb *imagesapi.Image) images.Image { func descFromProto(desc *types.Descriptor) ocispec.Descriptor { return ocispec.Descriptor{ - MediaType: desc.MediaType, - Size: desc.Size_, - Digest: desc.Digest, + MediaType: desc.MediaType, + Size: desc.Size_, + Digest: desc.Digest, + Annotations: desc.Annotations, } } func descToProto(desc *ocispec.Descriptor) types.Descriptor { return types.Descriptor{ - MediaType: desc.MediaType, - Size_: desc.Size, - Digest: desc.Digest, + MediaType: desc.MediaType, + Size_: desc.Size, + Digest: desc.Digest, + Annotations: desc.Annotations, } } diff --git a/vendor/github.com/containerd/containerd/services/leases/local.go b/vendor/github.com/containerd/containerd/services/leases/local.go index 0cb3108379593..fcc621d4d3994 100644 --- a/vendor/github.com/containerd/containerd/services/leases/local.go +++ b/vendor/github.com/containerd/containerd/services/leases/local.go @@ -107,3 +107,27 @@ func (l *local) List(ctx context.Context, filters ...string) ([]leases.Lease, er } return ll, nil } + +func (l *local) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + return l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).AddResource(ctx, lease, r) + }) +} + +func (l *local) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + return l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).DeleteResource(ctx, lease, r) + }) +} + +func (l *local) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + var rs []leases.Resource + if err := l.db.View(func(tx *bolt.Tx) error { + var err error + rs, err = metadata.NewLeaseManager(tx).ListResources(ctx, lease) + return err + }); err != nil { + return nil, err + } + return rs, nil +} diff --git a/vendor/github.com/containerd/containerd/services/leases/service.go b/vendor/github.com/containerd/containerd/services/leases/service.go index cc918d32db177..8dcc9f746e9ba 100644 --- a/vendor/github.com/containerd/containerd/services/leases/service.go +++ b/vendor/github.com/containerd/containerd/services/leases/service.go @@ -113,6 +113,56 @@ func (s *service) List(ctx context.Context, r *api.ListRequest) (*api.ListRespon }, nil } +func (s *service) AddResource(ctx context.Context, r *api.AddResourceRequest) (*ptypes.Empty, error) { + lease := leases.Lease{ + ID: r.ID, + } + + if err := s.lm.AddResource(ctx, lease, leases.Resource{ + ID: r.Resource.ID, + Type: r.Resource.Type, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return &ptypes.Empty{}, nil +} + +func (s *service) DeleteResource(ctx context.Context, r *api.DeleteResourceRequest) (*ptypes.Empty, error) { + lease := leases.Lease{ + ID: r.ID, + } + + if err := s.lm.DeleteResource(ctx, lease, leases.Resource{ + ID: r.Resource.ID, + Type: r.Resource.Type, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return &ptypes.Empty{}, nil +} + +func (s *service) ListResources(ctx context.Context, r *api.ListResourcesRequest) (*api.ListResourcesResponse, error) { + lease := leases.Lease{ + ID: r.ID, + } + + rs, err := s.lm.ListResources(ctx, lease) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + apiResources := make([]api.Resource, 0, len(rs)) + for _, i := range rs { + apiResources = append(apiResources, api.Resource{ + ID: i.ID, + Type: i.Type, + }) + } + return &api.ListResourcesResponse{ + Resources: apiResources, + }, nil +} + func leaseToGRPC(l leases.Lease) *api.Lease { return &api.Lease{ ID: l.ID, diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go index 26fb92599307d..365dfa0fd7232 100644 --- a/vendor/github.com/containerd/containerd/services/server/config/config.go +++ b/vendor/github.com/containerd/containerd/services/server/config/config.go @@ -17,13 +17,18 @@ package config import ( + "strings" + "github.com/BurntSushi/toml" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/plugin" "github.com/pkg/errors" ) // Config provides containerd configuration data for the server type Config struct { + // Version of the config file + Version int `toml:"version"` // Root is the path to a directory where containerd will store persistent data Root string `toml:"root"` // State is the path to a directory where containerd will store transient data @@ -51,9 +56,61 @@ type Config struct { // ProxyPlugins configures plugins which are communicated to over GRPC ProxyPlugins map[string]ProxyPlugin `toml:"proxy_plugins"` + StreamProcessors []StreamProcessor `toml:"stream_processors"` + md toml.MetaData } +// StreamProcessor provides configuration for diff content processors +type StreamProcessor struct { + // ID of the processor, also used to fetch the specific payload + ID string `toml:"id"` + // Accepts specific media-types + Accepts []string `toml:"accepts"` + // Returns the media-type + Returns string `toml:"returns"` + // Path or name of the binary + Path string `toml:"path"` + // Args to the binary + Args []string `toml:"args"` +} + +// GetVersion returns the config file's version +func (c *Config) GetVersion() int { + if c.Version == 0 { + return 1 + } + return c.Version +} + +// ValidateV2 validates the config for a v2 file +func (c *Config) ValidateV2() error { + if c.GetVersion() != 2 { + return nil + } + for _, p := range c.DisabledPlugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) + } + } + for _, p := range c.RequiredPlugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) + } + } + for p := range c.Plugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) + } + } + for p := range c.ProxyPlugins { + if len(strings.Split(p, ".")) < 4 { + return errors.Errorf("invalid proxy plugin key URI %q expect io.containerd.x.vx", p) + } + } + return nil +} + // GRPCConfig provides GRPC configuration for the socket type GRPCConfig struct { Address string `toml:"address"` @@ -130,15 +187,19 @@ func (bc *BoltConfig) Validate() error { } // Decode unmarshals a plugin specific configuration by plugin id -func (c *Config) Decode(id string, v interface{}) (interface{}, error) { +func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { + id := p.URI() + if c.GetVersion() == 1 { + id = p.ID + } data, ok := c.Plugins[id] if !ok { - return v, nil + return p.Config, nil } - if err := c.md.PrimitiveDecode(data, v); err != nil { + if err := c.md.PrimitiveDecode(data, p.Config); err != nil { return nil, err } - return v, nil + return p.Config, nil } // LoadConfig loads the containerd server config from the provided path @@ -151,5 +212,29 @@ func LoadConfig(path string, v *Config) error { return err } v.md = md - return nil + return v.ValidateV2() +} + +// V1DisabledFilter matches based on ID +func V1DisabledFilter(list []string) plugin.DisableFilter { + set := make(map[string]struct{}, len(list)) + for _, l := range list { + set[l] = struct{}{} + } + return func(r *plugin.Registration) bool { + _, ok := set[r.ID] + return ok + } +} + +// V2DisabledFilter matches based on URI +func V2DisabledFilter(list []string) plugin.DisableFilter { + set := make(map[string]struct{}, len(list)) + for _, l := range list { + set[l] = struct{}{} + } + return func(r *plugin.Registration) bool { + _, ok := set[r.URI()] + return ok + } } diff --git a/vendor/github.com/containerd/containerd/services/server/server.go b/vendor/github.com/containerd/containerd/services/server/server.go index 6ed429146902f..0e6923918ba26 100644 --- a/vendor/github.com/containerd/containerd/services/server/server.go +++ b/vendor/github.com/containerd/containerd/services/server/server.go @@ -35,6 +35,7 @@ import ( "github.com/containerd/containerd/content/local" csproxy "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" @@ -43,11 +44,14 @@ import ( srvconfig "github.com/containerd/containerd/services/server/config" "github.com/containerd/containerd/snapshots" ssproxy "github.com/containerd/containerd/snapshots/proxy" + "github.com/containerd/containerd/sys" + "github.com/containerd/ttrpc" metrics "github.com/docker/go-metrics" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) // CreateTopLevelDirectories creates the top-level root and state directories. @@ -61,13 +65,11 @@ func CreateTopLevelDirectories(config *srvconfig.Config) error { return errors.New("root and state must be different paths") } - if err := os.MkdirAll(config.Root, 0711); err != nil { + if err := sys.MkdirAllWithACL(config.Root, 0711); err != nil { return err } - if err := os.MkdirAll(config.State, 0711); err != nil { - return err - } - return nil + + return sys.MkdirAllWithACL(config.State, 0711) } // New creates and initializes a new containerd server @@ -79,6 +81,9 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { if err != nil { return nil, err } + for _, p := range config.StreamProcessors { + diff.RegisterProcessor(diff.BinaryHandler(p.ID, p.Returns, p.Accepts, p.Path, p.Args)) + } serverOpts := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), @@ -90,18 +95,46 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { if config.GRPC.MaxSendMsgSize > 0 { serverOpts = append(serverOpts, grpc.MaxSendMsgSize(config.GRPC.MaxSendMsgSize)) } - rpc := grpc.NewServer(serverOpts...) + ttrpcServer, err := newTTRPCServer() + if err != nil { + return nil, err + } + tcpServerOpts := serverOpts + if config.GRPC.TCPTLSCert != "" { + log.G(ctx).Info("setting up tls on tcp GRPC services...") + creds, err := credentials.NewServerTLSFromFile(config.GRPC.TCPTLSCert, config.GRPC.TCPTLSKey) + if err != nil { + return nil, err + } + tcpServerOpts = append(tcpServerOpts, grpc.Creds(creds)) + } var ( - services []plugin.Service - s = &Server{ - rpc: rpc, - events: exchange.NewExchange(), - config: config, + grpcServer = grpc.NewServer(serverOpts...) + tcpServer = grpc.NewServer(tcpServerOpts...) + + grpcServices []plugin.Service + tcpServices []plugin.TCPService + ttrpcServices []plugin.TTRPCService + + s = &Server{ + grpcServer: grpcServer, + tcpServer: tcpServer, + ttrpcServer: ttrpcServer, + events: exchange.NewExchange(), + config: config, } initialized = plugin.NewPluginSet() + required = make(map[string]struct{}) ) + for _, r := range config.RequiredPlugins { + required[r] = struct{}{} + } for _, p := range plugins { id := p.URI() + reqID := id + if config.GetVersion() == 1 { + reqID = p.ID + } log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) initContext := plugin.NewContext( @@ -116,11 +149,11 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { // load the plugin specific configuration if it is provided if p.Config != nil { - pluginConfig, err := config.Decode(p.ID, p.Config) + pc, err := config.Decode(p) if err != nil { return nil, err } - initContext.Config = pluginConfig + initContext.Config = pc } result := p.Init(initContext) if err := initialized.Add(result); err != nil { @@ -134,17 +167,47 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { } else { log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } + if _, ok := required[reqID]; ok { + return nil, errors.Wrapf(err, "load required plugin %s", id) + } continue } + + delete(required, reqID) // check for grpc services that should be registered with the server - if service, ok := instance.(plugin.Service); ok { - services = append(services, service) + if src, ok := instance.(plugin.Service); ok { + grpcServices = append(grpcServices, src) + } + if src, ok := instance.(plugin.TTRPCService); ok { + ttrpcServices = append(ttrpcServices, src) + } + if service, ok := instance.(plugin.TCPService); ok { + tcpServices = append(tcpServices, service) } + s.plugins = append(s.plugins, result) } + if len(required) != 0 { + var missing []string + for id := range required { + missing = append(missing, id) + } + return nil, errors.Errorf("required plugin %s not included", missing) + } + // register services after all plugins have been initialized - for _, service := range services { - if err := service.Register(rpc); err != nil { + for _, service := range grpcServices { + if err := service.Register(grpcServer); err != nil { + return nil, err + } + } + for _, service := range ttrpcServices { + if err := service.RegisterTTRPC(ttrpcServer); err != nil { + return nil, err + } + } + for _, service := range tcpServices { + if err := service.RegisterTCP(tcpServer); err != nil { return nil, err } } @@ -153,10 +216,12 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { // Server is the containerd main daemon type Server struct { - rpc *grpc.Server - events *exchange.Exchange - config *srvconfig.Config - plugins []*plugin.Plugin + grpcServer *grpc.Server + ttrpcServer *ttrpc.Server + tcpServer *grpc.Server + events *exchange.Exchange + config *srvconfig.Config + plugins []*plugin.Plugin } // ServeGRPC provides the containerd grpc APIs on the provided listener @@ -168,8 +233,13 @@ func (s *Server) ServeGRPC(l net.Listener) error { // before we start serving the grpc API register the grpc_prometheus metrics // handler. This needs to be the last service registered so that it can collect // metrics for every other service - grpc_prometheus.Register(s.rpc) - return trapClosedConnErr(s.rpc.Serve(l)) + grpc_prometheus.Register(s.grpcServer) + return trapClosedConnErr(s.grpcServer.Serve(l)) +} + +// ServeTTRPC provides the containerd ttrpc APIs on the provided listener +func (s *Server) ServeTTRPC(l net.Listener) error { + return trapClosedConnErr(s.ttrpcServer.Serve(context.Background(), l)) } // ServeMetrics provides a prometheus endpoint for exposing metrics @@ -179,6 +249,12 @@ func (s *Server) ServeMetrics(l net.Listener) error { return trapClosedConnErr(http.Serve(l, m)) } +// ServeTCP allows services to serve over tcp +func (s *Server) ServeTCP(l net.Listener) error { + grpc_prometheus.Register(s.tcpServer) + return trapClosedConnErr(s.tcpServer.Serve(l)) +} + // ServeDebug provides a debug endpoint func (s *Server) ServeDebug(l net.Listener) error { // don't use the default http server mux to make sure nothing gets registered @@ -195,12 +271,12 @@ func (s *Server) ServeDebug(l net.Listener) error { // Stop the containerd server canceling any open connections func (s *Server) Stop() { - s.rpc.Stop() + s.grpcServer.Stop() for i := len(s.plugins) - 1; i >= 0; i-- { p := s.plugins[i] instance, err := p.Instance() if err != nil { - log.L.WithError(err).WithField("id", p.Registration.ID). + log.L.WithError(err).WithField("id", p.Registration.URI()). Errorf("could not get plugin instance") continue } @@ -209,7 +285,7 @@ func (s *Server) Stop() { continue } if err := closer.Close(); err != nil { - log.L.WithError(err).WithField("id", p.Registration.ID). + log.L.WithError(err).WithField("id", p.Registration.URI()). Errorf("failed to close plugin") } } @@ -219,7 +295,11 @@ func (s *Server) Stop() { // of all plugins. func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) { // load all plugins into containerd - if err := plugin.Load(filepath.Join(config.Root, "plugins")); err != nil { + path := config.PluginDir + if path == "" { + path = filepath.Join(config.Root, "plugins") + } + if err := plugin.Load(path); err != nil { return nil, err } // load additional plugins that don't automatically register themselves @@ -345,8 +425,12 @@ func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Regis } + filter := srvconfig.V2DisabledFilter + if config.GetVersion() == 1 { + filter = srvconfig.V1DisabledFilter + } // return the ordered graph for plugins - return plugin.Graph(config.DisabledPlugins), nil + return plugin.Graph(filter(config.DisabledPlugins)), nil } type proxyClients struct { @@ -366,7 +450,7 @@ func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) { gopts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithBackoffMaxDelay(3 * time.Second), - grpc.WithDialer(dialer.Dialer), + grpc.WithContextDialer(dialer.ContextDialer), // TODO(stevvooe): We may need to allow configuration of this on the client. grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), diff --git a/vendor/github.com/containerd/containerd/services/server/server_linux.go b/vendor/github.com/containerd/containerd/services/server/server_linux.go index 96b28a572c060..47692fed2c897 100644 --- a/vendor/github.com/containerd/containerd/services/server/server_linux.go +++ b/vendor/github.com/containerd/containerd/services/server/server_linux.go @@ -24,6 +24,7 @@ import ( "github.com/containerd/containerd/log" srvconfig "github.com/containerd/containerd/services/server/config" "github.com/containerd/containerd/sys" + "github.com/containerd/ttrpc" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -53,3 +54,7 @@ func apply(ctx context.Context, config *srvconfig.Config) error { } return nil } + +func newTTRPCServer() (*ttrpc.Server, error) { + return ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_solaris.go b/vendor/github.com/containerd/containerd/services/server/server_solaris.go index f3182211f652e..35a637021d8fd 100644 --- a/vendor/github.com/containerd/containerd/services/server/server_solaris.go +++ b/vendor/github.com/containerd/containerd/services/server/server_solaris.go @@ -19,7 +19,7 @@ package server import ( "context" - srvconfig "github.com/containerd/containerd/server/config" + srvconfig "github.com/containerd/containerd/services/server/config" ) func apply(_ context.Context, _ *srvconfig.Config) error { diff --git a/vendor/github.com/containerd/containerd/services/server/server_unsupported.go b/vendor/github.com/containerd/containerd/services/server/server_unsupported.go index a6f1876510cfe..80674e69e709d 100644 --- a/vendor/github.com/containerd/containerd/services/server/server_unsupported.go +++ b/vendor/github.com/containerd/containerd/services/server/server_unsupported.go @@ -22,8 +22,13 @@ import ( "context" srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/ttrpc" ) func apply(_ context.Context, _ *srvconfig.Config) error { return nil } + +func newTTRPCServer() (*ttrpc.Server, error) { + return ttrpc.NewServer() +} diff --git a/vendor/github.com/containerd/containerd/services/server/server_windows.go b/vendor/github.com/containerd/containerd/services/server/server_windows.go index e0dd19b1d25a8..8b569eb455abe 100644 --- a/vendor/github.com/containerd/containerd/services/server/server_windows.go +++ b/vendor/github.com/containerd/containerd/services/server/server_windows.go @@ -22,8 +22,13 @@ import ( "context" srvconfig "github.com/containerd/containerd/services/server/config" + "github.com/containerd/ttrpc" ) func apply(_ context.Context, _ *srvconfig.Config) error { return nil } + +func newTTRPCServer() (*ttrpc.Server, error) { + return ttrpc.NewServer() +} diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index b4af6a30863d1..514538f7ecc06 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -86,10 +86,15 @@ func (k *Kind) UnmarshalJSON(b []byte) error { // Info provides information about a particular snapshot. // JSON marshallability is supported for interactive with tools like ctr, type Info struct { - Kind Kind // active or committed snapshot - Name string // name or key of snapshot - Parent string `json:",omitempty"` // name of parent snapshot - Labels map[string]string `json:",omitempty"` // Labels for snapshot + Kind Kind // active or committed snapshot + Name string // name or key of snapshot + Parent string `json:",omitempty"` // name of parent snapshot + + // Labels for a snapshot. + // + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the + // snapshotter's `Prepare`, `View`, or `Commit` calls. + Labels map[string]string `json:",omitempty"` Created time.Time `json:",omitempty"` // Created time Updated time.Time `json:",omitempty"` // Last update time } diff --git a/vendor/github.com/containerd/containerd/task_opts_unix.go b/vendor/github.com/containerd/containerd/task_opts_unix.go index d3b51a76d1de7..8b498d47efc7d 100644 --- a/vendor/github.com/containerd/containerd/task_opts_unix.go +++ b/vendor/github.com/containerd/containerd/task_opts_unix.go @@ -77,3 +77,29 @@ func WithNoPivotRoot(_ context.Context, _ *Client, ti *TaskInfo) error { } return nil } + +// WithShimCgroup sets the existing cgroup for the shim +func WithShimCgroup(path string) NewTaskOpts { + return func(ctx context.Context, c *Client, ti *TaskInfo) error { + if CheckRuntime(ti.Runtime(), "io.containerd.runc") { + if ti.Options == nil { + ti.Options = &options.Options{} + } + opts, ok := ti.Options.(*options.Options) + if !ok { + return errors.New("invalid v2 shim create options format") + } + opts.ShimCgroup = path + } else { + if ti.Options == nil { + ti.Options = &runctypes.CreateOptions{} + } + opts, ok := ti.Options.(*runctypes.CreateOptions) + if !ok { + return errors.New("could not cast TaskInfo Options to CreateOptions") + } + opts.ShimCgroup = path + } + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 46df00001cbdf..edb2f4ea37283 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,6 +1,6 @@ -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1 github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877 @@ -20,32 +20,32 @@ github.com/gogo/protobuf v1.2.1 github.com/gogo/googleapis v1.2.0 github.com/golang/protobuf v1.2.0 github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db -github.com/opencontainers/runc v1.0.0-rc8 +github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86 github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c -golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac -google.golang.org/grpc v1.12.0 +golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1 github.com/pkg/errors v0.8.1 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 -golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys +golang.org/x/sys 4c4f7f33c9ed00de01c4c741d2177abfcfe19307 https://github.com/golang/sys github.com/opencontainers/image-spec v1.0.1 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e github.com/BurntSushi/toml v0.3.1 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 -github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac +github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/containerd/ttrpc 699c4e40d1e7416e08bf7019c7ce2e9beced4636 +github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 -go.etcd.io/bbolt v1.3.2 +go.etcd.io/bbolt 2eb7227adea1d5cf85f0bc2a82b7059b13c2fa68 # cri dependencies -github.com/containerd/cri 2fc62db8146ce66f27b37306ad5fda34207835f3 # master -github.com/containerd/go-cni 891c2a41e18144b2d7921f971d6c9789a68046b2 +github.com/containerd/cri b213648c5bd0a1d2ee42709c10dff63fbfee3ad7 # master +github.com/containerd/go-cni 22460c018b64cf8bf4151b3ff9c4d077e6a88cbf github.com/containernetworking/cni v0.6.0 github.com/containernetworking/plugins v0.7.0 github.com/davecgh/go-spew v1.1.0 @@ -60,19 +60,20 @@ github.com/json-iterator/go 1.1.5 github.com/modern-go/reflect2 1.0.1 github.com/modern-go/concurrent 1.0.3 github.com/opencontainers/selinux v1.2.2 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +github.com/seccomp/libseccomp-golang v0.9.1 github.com/tchap/go-patricia v2.2.6 golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3 golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/yaml.v2 v2.2.1 -k8s.io/api kubernetes-1.15.0-alpha.0 -k8s.io/apimachinery kubernetes-1.15.0-alpha.0 -k8s.io/apiserver kubernetes-1.15.0-alpha.0 -k8s.io/client-go kubernetes-1.15.0-alpha.0 -k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f -k8s.io/kubernetes v1.15.0-alpha.0 +k8s.io/api kubernetes-1.15.0 +k8s.io/apimachinery kubernetes-1.15.0 +k8s.io/apiserver kubernetes-1.15.0 +k8s.io/cri-api kubernetes-1.15.0 +k8s.io/client-go kubernetes-1.15.0 +k8s.io/klog v0.3.1 +k8s.io/kubernetes v1.15.0 k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c sigs.k8s.io/yaml v1.1.0 @@ -83,3 +84,8 @@ github.com/google/uuid v1.1.1 # aufs dependencies github.com/containerd/aufs f894a800659b6e11c1a13084abd1712f346e349c + +# image encryption dependencies +gopkg.in/square/go-jose.v2 8254d6c783765f38c8675fae4427a1fe73fbd09d https://github.com/square/go-jose.git +github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4 +github.com/miscreant/miscreant-go 325cbd69228b6af571a635f7502586a920a2749a https://github.com/miscreant/miscreant.go diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go deleted file mode 100644 index 75c98594ac68f..0000000000000 --- a/vendor/github.com/containerd/continuity/context.go +++ /dev/null @@ -1,673 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/containerd/continuity/devices" - driverpkg "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - - "github.com/opencontainers/go-digest" -) - -var ( - // ErrNotFound represents the resource not found - ErrNotFound = fmt.Errorf("not found") - // ErrNotSupported represents the resource not supported - ErrNotSupported = fmt.Errorf("not supported") -) - -// Context represents a file system context for accessing resources. The -// responsibility of the context is to convert system specific resources to -// generic Resource objects. Most of this is safe path manipulation, as well -// as extraction of resource details. -type Context interface { - Apply(Resource) error - Verify(Resource) error - Resource(string, os.FileInfo) (Resource, error) - Walk(filepath.WalkFunc) error -} - -// SymlinkPath is intended to give the symlink target value -// in a root context. Target and linkname are absolute paths -// not under the given root. -type SymlinkPath func(root, linkname, target string) (string, error) - -// ContextOptions represents options to create a new context. -type ContextOptions struct { - Digester Digester - Driver driverpkg.Driver - PathDriver pathdriver.PathDriver - Provider ContentProvider -} - -// context represents a file system context for accessing resources. -// Generally, all path qualified access and system considerations should land -// here. -type context struct { - driver driverpkg.Driver - pathDriver pathdriver.PathDriver - root string - digester Digester - provider ContentProvider -} - -// NewContext returns a Context associated with root. The default driver will -// be used, as returned by NewDriver. -func NewContext(root string) (Context, error) { - return NewContextWithOptions(root, ContextOptions{}) -} - -// NewContextWithOptions returns a Context associate with the root. -func NewContextWithOptions(root string, options ContextOptions) (Context, error) { - // normalize to absolute path - pathDriver := options.PathDriver - if pathDriver == nil { - pathDriver = pathdriver.LocalPathDriver - } - - root = pathDriver.FromSlash(root) - root, err := pathDriver.Abs(pathDriver.Clean(root)) - if err != nil { - return nil, err - } - - driver := options.Driver - if driver == nil { - driver, err = driverpkg.NewSystemDriver() - if err != nil { - return nil, err - } - } - - digester := options.Digester - if digester == nil { - digester = simpleDigester{digest.Canonical} - } - - // Check the root directory. Need to be a little careful here. We are - // allowing a link for now, but this may have odd behavior when - // canonicalizing paths. As long as all files are opened through the link - // path, this should be okay. - fi, err := driver.Stat(root) - if err != nil { - return nil, err - } - - if !fi.IsDir() { - return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid} - } - - return &context{ - root: root, - driver: driver, - pathDriver: pathDriver, - digester: digester, - provider: options.Provider, - }, nil -} - -// Resource returns the resource as path p, populating the entry with info -// from fi. The path p should be the path of the resource in the context, -// typically obtained through Walk or from the value of Resource.Path(). If fi -// is nil, it will be resolved. -func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) { - fp, err := c.fullpath(p) - if err != nil { - return nil, err - } - - if fi == nil { - fi, err = c.driver.Lstat(fp) - if err != nil { - return nil, err - } - } - - base, err := newBaseResource(p, fi) - if err != nil { - return nil, err - } - - base.xattrs, err = c.resolveXAttrs(fp, fi, base) - if err == ErrNotSupported { - log.Printf("resolving xattrs on %s not supported", fp) - } else if err != nil { - return nil, err - } - - // TODO(stevvooe): Handle windows alternate data streams. - - if fi.Mode().IsRegular() { - dgst, err := c.digest(p) - if err != nil { - return nil, err - } - - return newRegularFile(*base, base.paths, fi.Size(), dgst) - } - - if fi.Mode().IsDir() { - return newDirectory(*base) - } - - if fi.Mode()&os.ModeSymlink != 0 { - // We handle relative links vs absolute links by including a - // beginning slash for absolute links. Effectively, the bundle's - // root is treated as the absolute link anchor. - target, err := c.driver.Readlink(fp) - if err != nil { - return nil, err - } - - return newSymLink(*base, target) - } - - if fi.Mode()&os.ModeNamedPipe != 0 { - return newNamedPipe(*base, base.paths) - } - - if fi.Mode()&os.ModeDevice != 0 { - deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver) - if !ok { - log.Printf("device extraction not supported %s", fp) - return nil, ErrNotSupported - } - - // character and block devices merely need to recover the - // major/minor device number. - major, minor, err := deviceDriver.DeviceInfo(fi) - if err != nil { - return nil, err - } - - return newDevice(*base, base.paths, major, minor) - } - - log.Printf("%q (%v) is not supported", fp, fi.Mode()) - return nil, ErrNotFound -} - -func (c *context) verifyMetadata(resource, target Resource) error { - if target.Mode() != resource.Mode() { - return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode()) - } - - if target.UID() != resource.UID() { - return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID()) - } - - if target.GID() != resource.GID() { - return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID()) - } - - if xattrer, ok := resource.(XAttrer); ok { - txattrer, tok := target.(XAttrer) - if !tok { - return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path()) - } - - // For xattrs, only ensure that we have those defined in the resource - // and their values match. We can ignore other xattrs. In other words, - // we only verify that target has the subset defined by resource. - txattrs := txattrer.XAttrs() - for attr, value := range xattrer.XAttrs() { - tvalue, ok := txattrs[attr] - if !ok { - return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr) - } - - if !bytes.Equal(value, tvalue) { - return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path()) - } - } - } - - switch r := resource.(type) { - case RegularFile: - // TODO(stevvooe): Another reason to use a record-based approach. We - // have to do another type switch to get this to work. This could be - // fixed with an Equal function, but let's study this a little more to - // be sure. - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - if t.Size() != r.Size() { - return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size()) - } - case Directory: - t, ok := target.(Directory) - if !ok { - return fmt.Errorf("resource %q target not a directory", t.Path()) - } - case SymLink: - t, ok := target.(SymLink) - if !ok { - return fmt.Errorf("resource %q target not a symlink", t.Path()) - } - - if t.Target() != r.Target() { - return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target()) - } - case Device: - t, ok := target.(Device) - if !ok { - return fmt.Errorf("resource %q is not a device", t.Path()) - } - - if t.Major() != r.Major() || t.Minor() != r.Minor() { - return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor()) - } - case NamedPipe: - t, ok := target.(NamedPipe) - if !ok { - return fmt.Errorf("resource %q is not a named pipe", t.Path()) - } - default: - return fmt.Errorf("cannot verify resource: %v", resource) - } - - return nil -} - -// Verify the resource in the context. An error will be returned a discrepancy -// is found. -func (c *context) Verify(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - fi, err := c.driver.Lstat(fp) - if err != nil { - return err - } - - target, err := c.Resource(resource.Path(), fi) - if err != nil { - return err - } - - if target.Path() != resource.Path() { - return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path()) - } - - if err := c.verifyMetadata(resource, target); err != nil { - return err - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - hardlinkKey, err := newHardlinkKey(fi) - if err == errNotAHardLink { - if len(h.Paths()) > 1 { - return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path()) - } - } else if err != nil { - return err - } - - for _, path := range h.Paths()[1:] { - fpLink, err := c.fullpath(path) - if err != nil { - return err - } - - fiLink, err := c.driver.Lstat(fpLink) - if err != nil { - return err - } - - targetLink, err := c.Resource(path, fiLink) - if err != nil { - return err - } - - hardlinkKeyLink, err := newHardlinkKey(fiLink) - if err != nil { - return err - } - - if hardlinkKeyLink != hardlinkKey { - return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path()) - } - - if err := c.verifyMetadata(resource, targetLink); err != nil { - return err - } - } - } - - switch r := resource.(type) { - case RegularFile: - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - // TODO(stevvooe): This may need to get a little more sophisticated - // for digest comparison. We may want to actually calculate the - // provided digests, rather than the implementations having an - // overlap. - if !digestsMatch(t.Digests(), r.Digests()) { - return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests()) - } - } - - return nil -} - -func (c *context) checkoutFile(fp string, rf RegularFile) error { - if c.provider == nil { - return fmt.Errorf("no file provider") - } - var ( - r io.ReadCloser - err error - ) - for _, dgst := range rf.Digests() { - r, err = c.provider.Reader(dgst) - if err == nil { - break - } - } - if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) - } - defer r.Close() - - return atomicWriteFile(fp, r, rf.Size(), rf.Mode()) -} - -// Apply the resource to the contexts. An error will be returned if the -// operation fails. Depending on the resource type, the resource may be -// created. For resource that cannot be resolved, an error will be returned. -func (c *context) Apply(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - if !strings.HasPrefix(fp, c.root) { - return fmt.Errorf("resource %v escapes root", resource) - } - - var chmod = true - fi, err := c.driver.Lstat(fp) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - switch r := resource.(type) { - case RegularFile: - if fi == nil { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - chmod = false - } else { - if !fi.Mode().IsRegular() { - return fmt.Errorf("file %q should be a regular file, but is not", resource.Path()) - } - if fi.Size() != r.Size() { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - } else { - for _, dgst := range r.Digests() { - f, err := os.Open(fp) - if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) - } - compared, err := dgst.Algorithm().FromReader(f) - if err == nil && dgst != compared { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - break - } - if err1 := f.Close(); err == nil { - err = err1 - } - if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) - } - } - } - } - case Directory: - if fi == nil { - if err := c.driver.Mkdir(fp, resource.Mode()); err != nil { - return err - } - } else if !fi.Mode().IsDir() { - return fmt.Errorf("%q should be a directory, but is not", resource.Path()) - } - - case SymLink: - var target string // only possibly set if target resource is a symlink - - if fi != nil { - if fi.Mode()&os.ModeSymlink != 0 { - target, err = c.driver.Readlink(fp) - if err != nil { - return err - } - } - } - - if target != r.Target() { - if fi != nil { - if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory? - return err - } - } - - if err := c.driver.Symlink(r.Target(), fp); err != nil { - return err - } - } - - case Device: - if fi == nil { - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } else if (fi.Mode() & os.ModeDevice) == 0 { - return fmt.Errorf("%q should be a device, but is not", resource.Path()) - } else { - major, minor, err := devices.DeviceInfo(fi) - if err != nil { - return err - } - if major != r.Major() || minor != r.Minor() { - if err := c.driver.Remove(fp); err != nil { - return err - } - - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } - } - - case NamedPipe: - if fi == nil { - if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil { - return err - } - } else if (fi.Mode() & os.ModeNamedPipe) == 0 { - return fmt.Errorf("%q should be a named pipe, but is not", resource.Path()) - } - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - for _, path := range h.Paths() { - if path == resource.Path() { - continue - } - - lp, err := c.fullpath(path) - if err != nil { - return err - } - - if _, fi := c.driver.Lstat(lp); fi == nil { - c.driver.Remove(lp) - } - if err := c.driver.Link(fp, lp); err != nil { - return err - } - } - } - - // Update filemode if file was not created - if chmod { - if err := c.driver.Lchmod(fp, resource.Mode()); err != nil { - return err - } - } - - if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil { - return err - } - - if xattrer, ok := resource.(XAttrer); ok { - // For xattrs, only ensure that we have those defined in the resource - // and their values are set. We can ignore other xattrs. In other words, - // we only set xattres defined by resource but never remove. - - if _, ok := resource.(SymLink); ok { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path()) - } - if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } else { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - return fmt.Errorf("unsupported xattr for resource %q", resource.Path()) - } - if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } - } - - return nil -} - -// Walk provides a convenience function to call filepath.Walk correctly for -// the context. Otherwise identical to filepath.Walk, the path argument is -// corrected to be contained within the context. -func (c *context) Walk(fn filepath.WalkFunc) error { - root := c.root - fi, err := c.driver.Lstat(c.root) - if err == nil && fi.Mode()&os.ModeSymlink != 0 { - root, err = c.driver.Readlink(c.root) - if err != nil { - return err - } - } - return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, err error) error { - contained, err := c.containWithRoot(p, root) - return fn(contained, fi, err) - }) -} - -// fullpath returns the system path for the resource, joined with the context -// root. The path p must be a part of the context. -func (c *context) fullpath(p string) (string, error) { - p = c.pathDriver.Join(c.root, p) - if !strings.HasPrefix(p, c.root) { - return "", fmt.Errorf("invalid context path") - } - - return p, nil -} - -// contain cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the context root. -func (c *context) contain(p string) (string, error) { - return c.containWithRoot(p, c.root) -} - -// containWithRoot cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the passed root. Extra care should be used when calling this -// instead of contain. This is needed for Walk, as if context root is a symlink, -// it must be evaluated prior to the Walk -func (c *context) containWithRoot(p string, root string) (string, error) { - sanitized, err := c.pathDriver.Rel(root, p) - if err != nil { - return "", err - } - - // ZOMBIES(stevvooe): In certain cases, we may want to remap these to a - // "containment error", so the caller can decide what to do. - return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil -} - -// digest returns the digest of the file at path p, relative to the root. -func (c *context) digest(p string) (digest.Digest, error) { - f, err := c.driver.Open(c.pathDriver.Join(c.root, p)) - if err != nil { - return "", err - } - defer f.Close() - - return c.digester.Digest(f) -} - -// resolveXAttrs attempts to resolve the extended attributes for the resource -// at the path fp, which is the full path to the resource. If the resource -// cannot have xattrs, nil will be returned. -func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) { - if fi.Mode().IsRegular() || fi.Mode().IsDir() { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - log.Println("xattr extraction not supported") - return nil, ErrNotSupported - } - - return xattrDriver.Getxattr(fp) - } - - if fi.Mode()&os.ModeSymlink != 0 { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - log.Println("xattr extraction for symlinks not supported") - return nil, ErrNotSupported - } - - return lxattrDriver.LGetxattr(fp) - } - - return nil, nil -} diff --git a/vendor/github.com/containerd/continuity/digests.go b/vendor/github.com/containerd/continuity/digests.go deleted file mode 100644 index bf92275dbd366..0000000000000 --- a/vendor/github.com/containerd/continuity/digests.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "sort" - - "github.com/opencontainers/go-digest" -) - -// Digester produces a digest for a given read stream -type Digester interface { - Digest(io.Reader) (digest.Digest, error) -} - -// ContentProvider produces a read stream for a given digest -type ContentProvider interface { - Reader(digest.Digest) (io.ReadCloser, error) -} - -type simpleDigester struct { - algorithm digest.Algorithm -} - -func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) { - digester := sd.algorithm.Digester() - - if _, err := io.Copy(digester.Hash(), r); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the -// digests are not repeated and no two digests with the same algorithm have -// different values. Because a stable sort is used, this has the effect of -// "zipping" digest collections from multiple resources. -func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) { - sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here. - seen := map[digest.Digest]struct{}{} - algs := map[digest.Algorithm][]digest.Digest{} // detect different digests. - - var out []digest.Digest - // uniqify the digests - for _, d := range digests { - if _, ok := seen[d]; ok { - continue - } - - seen[d] = struct{}{} - algs[d.Algorithm()] = append(algs[d.Algorithm()], d) - - if len(algs[d.Algorithm()]) > 1 { - return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm()) - } - - out = append(out, d) - } - - return out, nil -} - -// digestsMatch compares the two sets of digests to see if they match. -func digestsMatch(as, bs []digest.Digest) bool { - all := append(as, bs...) - - uniqified, err := uniqifyDigests(all...) - if err != nil { - // the only error uniqifyDigests returns is when the digests disagree. - return false - } - - disjoint := len(as) + len(bs) - if len(uniqified) == disjoint { - // if these two sets have the same cardinality, we know both sides - // didn't share any digests. - return false - } - - return true -} - -type digestSlice []digest.Digest - -func (p digestSlice) Len() int { return len(p) } -func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/containerd/continuity/groups_unix.go b/vendor/github.com/containerd/continuity/groups_unix.go deleted file mode 100644 index 022d8ab783911..0000000000000 --- a/vendor/github.com/containerd/continuity/groups_unix.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// TODO(stevvooe): This needs a lot of work before we can call it useful. - -type groupIndex struct { - byName map[string]*group - byGID map[int]*group -} - -func getGroupIndex() (*groupIndex, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return nil, err - } - - return newGroupIndex(groups), nil -} - -func newGroupIndex(groups []group) *groupIndex { - gi := &groupIndex{ - byName: make(map[string]*group), - byGID: make(map[int]*group), - } - - for i, group := range groups { - gi.byGID[group.gid] = &groups[i] - gi.byName[group.name] = &groups[i] - } - - return gi -} - -type group struct { - name string - gid int - members []string -} - -func getGroupName(gid int) (string, error) { - f, err := os.Open("/etc/group") - if err != nil { - return "", err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return "", err - } - - for _, group := range groups { - if group.gid == gid { - return group.name, nil - } - } - - return "", fmt.Errorf("no group for gid") -} - -// parseGroups parses an /etc/group file for group names, ids and membership. -// This is unix specific. -func parseGroups(rd io.Reader) ([]group, error) { - var groups []group - scanner := bufio.NewScanner(rd) - - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "#") { - continue // skip comment - } - - parts := strings.SplitN(scanner.Text(), ":", 4) - - if len(parts) != 4 { - return nil, fmt.Errorf("bad entry: %q", scanner.Text()) - } - - name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3] - - gid, err := strconv.Atoi(sgid) - if err != nil { - return nil, fmt.Errorf("bad gid: %q", gid) - } - - members := strings.Split(smembers, ",") - - groups = append(groups, group{ - name: name, - gid: gid, - members: members, - }) - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - - return groups, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go deleted file mode 100644 index d493dd7776bc7..0000000000000 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" -) - -var ( - errNotAHardLink = fmt.Errorf("invalid hardlink") -) - -type hardlinkManager struct { - hardlinks map[hardlinkKey][]Resource -} - -func newHardlinkManager() *hardlinkManager { - return &hardlinkManager{ - hardlinks: map[hardlinkKey][]Resource{}, - } -} - -// Add attempts to add the resource to the hardlink manager. If the resource -// cannot be considered as a hardlink candidate, errNotAHardLink is returned. -func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error { - if _, ok := resource.(Hardlinkable); !ok { - return errNotAHardLink - } - - key, err := newHardlinkKey(fi) - if err != nil { - return err - } - - hlm.hardlinks[key] = append(hlm.hardlinks[key], resource) - - return nil -} - -// Merge processes the current state of the hardlink manager and merges any -// shared nodes into hardlinked resources. -func (hlm *hardlinkManager) Merge() ([]Resource, error) { - var resources []Resource - for key, linked := range hlm.hardlinks { - if len(linked) < 1 { - return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key) - } - - merged, err := Merge(linked...) - if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) - } - - resources = append(resources, merged) - } - - return resources, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_unix.go b/vendor/github.com/containerd/continuity/hardlinks_unix.go deleted file mode 100644 index a15d1759ee6ce..0000000000000 --- a/vendor/github.com/containerd/continuity/hardlinks_unix.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// hardlinkKey provides a tuple-key for managing hardlinks. This is system- -// specific. -type hardlinkKey struct { - dev uint64 - inode uint64 -} - -// newHardlinkKey returns a hardlink key for the provided file info. If the -// resource does not represent a possible hardlink, errNotAHardLink will be -// returned. -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo") - } - - if sys.Nlink < 2 { - // NOTE(stevvooe): This is not always true for all filesystems. We - // should somehow detect this and provided a slow "polyfill" that - // leverages os.SameFile if we detect a filesystem where link counts - // is not really supported. - return hardlinkKey{}, errNotAHardLink - } - - return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil -} diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go deleted file mode 100644 index 503640ebfc86b..0000000000000 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// AtomicWriteFile atomically writes data to a file by first writing to a -// temp file and calling rename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - buf := bytes.NewBuffer(data) - return atomicWriteFile(filename, buf, int64(len(data)), perm) -} - -// atomicWriteFile writes data to a file by first writing to a temp -// file and calling rename. -func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return err - } - err = os.Chmod(f.Name(), perm) - if err != nil { - f.Close() - return err - } - n, err := io.Copy(f, r) - if err == nil && n < dataSize { - f.Close() - return io.ErrShortWrite - } - if err != nil { - f.Close() - return err - } - if err := f.Sync(); err != nil { - f.Close() - return err - } - if err := f.Close(); err != nil { - return err - } - return os.Rename(f.Name(), filename) -} diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go deleted file mode 100644 index 8074bbfbb1e22..0000000000000 --- a/vendor/github.com/containerd/continuity/manifest.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "io" - "log" - "os" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" -) - -// Manifest provides the contents of a manifest. Users of this struct should -// not typically modify any fields directly. -type Manifest struct { - // Resources specifies all the resources for a manifest in order by path. - Resources []Resource -} - -func Unmarshal(p []byte) (*Manifest, error) { - var bm pb.Manifest - - if err := proto.Unmarshal(p, &bm); err != nil { - return nil, err - } - - var m Manifest - for _, b := range bm.Resource { - r, err := fromProto(b) - if err != nil { - return nil, err - } - - m.Resources = append(m.Resources, r) - } - - return &m, nil -} - -func Marshal(m *Manifest) ([]byte, error) { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.Marshal(&bm) -} - -func MarshalText(w io.Writer, m *Manifest) error { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.MarshalText(w, &bm) -} - -// BuildManifest creates the manifest for the given context -func BuildManifest(ctx Context) (*Manifest, error) { - resourcesByPath := map[string]Resource{} - hardlinks := newHardlinkManager() - - if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) - } - - if p == string(os.PathSeparator) { - // skip root - return nil - } - - resource, err := ctx.Resource(p, fi) - if err != nil { - if err == ErrNotFound { - return nil - } - log.Printf("error getting resource %q: %v", p, err) - return err - } - - // add to the hardlink manager - if err := hardlinks.Add(fi, resource); err == nil { - // Resource has been accepted by hardlink manager so we don't add - // it to the resourcesByPath until we merge at the end. - return nil - } else if err != errNotAHardLink { - // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) - } - - resourcesByPath[p] = resource - - return nil - }); err != nil { - return nil, err - } - - // merge and post-process the hardlinks. - hardlinked, err := hardlinks.Merge() - if err != nil { - return nil, err - } - - for _, resource := range hardlinked { - resourcesByPath[resource.Path()] = resource - } - - var resources []Resource - for _, resource := range resourcesByPath { - resources = append(resources, resource) - } - - sort.Stable(ByPath(resources)) - - return &Manifest{ - Resources: resources, - }, nil -} - -// VerifyManifest verifies all the resources in a manifest -// against files from the given context. -func VerifyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Verify(resource); err != nil { - return err - } - } - - return nil -} - -// ApplyManifest applies on the resources in a manifest to -// the given context. -func ApplyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Apply(resource); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.pb.go b/vendor/github.com/containerd/continuity/proto/manifest.pb.go deleted file mode 100644 index 24317766257ad..0000000000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.pb.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by protoc-gen-go. -// source: manifest.proto -// DO NOT EDIT! - -/* -Package proto is a generated protocol buffer package. - -It is generated from these files: - manifest.proto - -It has these top-level messages: - Manifest - Resource - XAttr - ADSEntry -*/ -package proto - -import proto1 "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto1.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -type Manifest struct { - Resource []*Resource `protobuf:"bytes,1,rep,name=resource" json:"resource,omitempty"` -} - -func (m *Manifest) Reset() { *m = Manifest{} } -func (m *Manifest) String() string { return proto1.CompactTextString(m) } -func (*Manifest) ProtoMessage() {} -func (*Manifest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Manifest) GetResource() []*Resource { - if m != nil { - return m.Resource - } - return nil -} - -type Resource struct { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - Path []string `protobuf:"bytes,1,rep,name=path" json:"path,omitempty"` - // Uid specifies the user id for the resource. - Uid int64 `protobuf:"varint,2,opt,name=uid" json:"uid,omitempty"` - // Gid specifies the group id for the resource. - Gid int64 `protobuf:"varint,3,opt,name=gid" json:"gid,omitempty"` - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` - Group string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - Mode uint32 `protobuf:"varint,6,opt,name=mode" json:"mode,omitempty"` - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - Digest []string `protobuf:"bytes,8,rep,name=digest" json:"digest,omitempty"` - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - Target string `protobuf:"bytes,9,opt,name=target" json:"target,omitempty"` - // Major specifies the major device number for character and block devices. - Major uint64 `protobuf:"varint,10,opt,name=major" json:"major,omitempty"` - // Minor specifies the minor device number for character and block devices. - Minor uint64 `protobuf:"varint,11,opt,name=minor" json:"minor,omitempty"` - // Xattr provides storage for extended attributes for the target resource. - Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr" json:"xattr,omitempty"` - // Ads stores one or more alternate data streams for the target resource. - Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads" json:"ads,omitempty"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto1.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Resource) GetXattr() []*XAttr { - if m != nil { - return m.Xattr - } - return nil -} - -func (m *Resource) GetAds() []*ADSEntry { - if m != nil { - return m.Ads - } - return nil -} - -// XAttr encodes extended attributes for a resource. -type XAttr struct { - // Name specifies the attribute name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the associated data for the attribute. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *XAttr) Reset() { *m = XAttr{} } -func (m *XAttr) String() string { return proto1.CompactTextString(m) } -func (*XAttr) ProtoMessage() {} -func (*XAttr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -// ADSEntry encodes information for a Windows Alternate Data Stream. -type ADSEntry struct { - // Name specifices the stream name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Data specifies the stream data. - // See also the description about the digest below. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` -} - -func (m *ADSEntry) Reset() { *m = ADSEntry{} } -func (m *ADSEntry) String() string { return proto1.CompactTextString(m) } -func (*ADSEntry) ProtoMessage() {} -func (*ADSEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func init() { - proto1.RegisterType((*Manifest)(nil), "proto.Manifest") - proto1.RegisterType((*Resource)(nil), "proto.Resource") - proto1.RegisterType((*XAttr)(nil), "proto.XAttr") - proto1.RegisterType((*ADSEntry)(nil), "proto.ADSEntry") -} - -func init() { proto1.RegisterFile("manifest.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x49, 0x93, 0xf4, 0x4d, 0xa7, 0xed, 0xab, 0x2c, 0x52, 0xe6, 0x18, 0x73, 0x0a, 0x08, - 0x15, 0xf4, 0xe0, 0xb9, 0xa2, 0x17, 0xc1, 0xcb, 0x7a, 0xf1, 0xba, 0xba, 0x6b, 0x5c, 0x21, 0xd9, - 0xb0, 0xd9, 0x80, 0xfa, 0xe5, 0xfc, 0x6a, 0x32, 0xb3, 0x69, 0xd1, 0x9b, 0xa7, 0x3c, 0xcf, 0x6f, - 0xfe, 0x64, 0xf6, 0x81, 0xff, 0xad, 0xea, 0xec, 0x8b, 0x19, 0xc2, 0xb6, 0xf7, 0x2e, 0x38, 0x91, - 0xf3, 0xa7, 0xba, 0x82, 0xe2, 0x7e, 0x2a, 0x88, 0x33, 0x28, 0xbc, 0x19, 0xdc, 0xe8, 0x9f, 0x0d, - 0x26, 0x65, 0x5a, 0x2f, 0x2f, 0x8e, 0x62, 0xf3, 0x56, 0x4e, 0x58, 0x1e, 0x1a, 0xaa, 0xaf, 0x19, - 0x14, 0x7b, 0x2c, 0x04, 0x64, 0xbd, 0x0a, 0xaf, 0x3c, 0xb5, 0x90, 0xac, 0xc5, 0x31, 0xa4, 0xa3, - 0xd5, 0x38, 0x2b, 0x93, 0x3a, 0x95, 0x24, 0x89, 0x34, 0x56, 0x63, 0x1a, 0x49, 0x63, 0xb5, 0xd8, - 0x40, 0x36, 0x0e, 0xc6, 0x63, 0x56, 0x26, 0xf5, 0xe2, 0x7a, 0x86, 0x89, 0x64, 0x2f, 0x10, 0xf2, - 0xc6, 0xbb, 0xb1, 0xc7, 0xfc, 0x50, 0x88, 0x80, 0xfe, 0xd4, 0x3a, 0x6d, 0x70, 0x5e, 0x26, 0xf5, - 0x5a, 0xb2, 0x26, 0x36, 0xd8, 0x4f, 0x83, 0xff, 0xca, 0xa4, 0xce, 0x24, 0x6b, 0xb1, 0x81, 0xb9, - 0xb6, 0x8d, 0x19, 0x02, 0x16, 0x7c, 0xd3, 0xe4, 0x88, 0x07, 0xe5, 0x1b, 0x13, 0x70, 0x41, 0xab, - 0xe5, 0xe4, 0xc4, 0x09, 0xe4, 0xad, 0x7a, 0x73, 0x1e, 0x81, 0x97, 0x44, 0xc3, 0xd4, 0x76, 0xce, - 0xe3, 0x72, 0xa2, 0x64, 0x44, 0x05, 0xf9, 0xbb, 0x0a, 0xc1, 0xe3, 0x8a, 0x43, 0x5a, 0x4d, 0x21, - 0x3d, 0xee, 0x42, 0xf0, 0x32, 0x96, 0xc4, 0x29, 0xa4, 0x4a, 0x0f, 0xb8, 0xfe, 0x15, 0xe3, 0xee, - 0xe6, 0xe1, 0xb6, 0x0b, 0xfe, 0x43, 0x52, 0xad, 0x3a, 0x87, 0x9c, 0x47, 0xe8, 0xfe, 0x4e, 0xb5, - 0x94, 0x39, 0x5d, 0xc4, 0x9a, 0x98, 0x56, 0x41, 0x71, 0x7c, 0x2b, 0xc9, 0xba, 0xba, 0x83, 0x62, - 0xbf, 0xe1, 0xaf, 0x33, 0x3f, 0x72, 0x48, 0xe3, 0x7b, 0xa3, 0x7b, 0x9a, 0xf3, 0x45, 0x97, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xef, 0x27, 0x99, 0xf7, 0x17, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/continuity/proto/manifest.proto b/vendor/github.com/containerd/continuity/proto/manifest.proto deleted file mode 100644 index 66ef80f054ed7..0000000000000 --- a/vendor/github.com/containerd/continuity/proto/manifest.proto +++ /dev/null @@ -1,97 +0,0 @@ -syntax = "proto3"; - -package proto; - -// Manifest specifies the entries in a container bundle, keyed and sorted by -// path. -message Manifest { - repeated Resource resource = 1; -} - -message Resource { - // Path specifies the path from the bundle root. If more than one - // path is present, the entry may represent a hardlink, rather than using - // a link target. The path format is operating system specific. - repeated string path = 1; - - // NOTE(stevvooe): Need to define clear precedence for user/group/uid/gid precedence. - - // Uid specifies the user id for the resource. - int64 uid = 2; - - // Gid specifies the group id for the resource. - int64 gid = 3; - - // user and group are not currently used but their field numbers have been - // reserved for future use. As such, they are marked as deprecated. - string user = 4 [deprecated=true]; // "deprecated" stands for "reserved" here - string group = 5 [deprecated=true]; // "deprecated" stands for "reserved" here - - // Mode defines the file mode and permissions. We've used the same - // bit-packing from Go's os package, - // http://golang.org/pkg/os/#FileMode, since they've done the work of - // creating a cross-platform layout. - uint32 mode = 6; - - // NOTE(stevvooe): Beyond here, we start defining type specific fields. - - // Size specifies the size in bytes of the resource. This is only valid - // for regular files. - uint64 size = 7; - - // Digest specifies the content digest of the target file. Only valid for - // regular files. The strings are formatted in OCI style, i.e. :. - // For detailed information about the format, please refer to OCI Image Spec: - // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification - // The digests are sorted in lexical order and implementations may choose - // which algorithms they prefer. - repeated string digest = 8; - - // Target defines the target of a hard or soft link. Absolute links start - // with a slash and specify the resource relative to the bundle root. - // Relative links do not start with a slash and are relative to the - // resource path. - string target = 9; - - // Major specifies the major device number for character and block devices. - uint64 major = 10; - - // Minor specifies the minor device number for character and block devices. - uint64 minor = 11; - - // Xattr provides storage for extended attributes for the target resource. - repeated XAttr xattr = 12; - - // Ads stores one or more alternate data streams for the target resource. - repeated ADSEntry ads = 13; - -} - -// XAttr encodes extended attributes for a resource. -message XAttr { - // Name specifies the attribute name. - string name = 1; - - // Data specifies the associated data for the attribute. - bytes data = 2; -} - -// ADSEntry encodes information for a Windows Alternate Data Stream. -message ADSEntry { - // Name specifices the stream name. - string name = 1; - - // Data specifies the stream data. - // See also the description about the digest below. - bytes data = 2; - - // Digest is a CAS representation of the stream data. - // - // At least one of data or digest MUST be specified, and either one of them - // SHOULD be specified. - // - // How to access the actual data using the digest is implementation-specific, - // and implementations can choose not to implement digest. - // So, digest SHOULD be used only when the stream data is large. - string digest = 3; -} diff --git a/vendor/github.com/containerd/continuity/resource.go b/vendor/github.com/containerd/continuity/resource.go deleted file mode 100644 index d2f52bd31a6e5..0000000000000 --- a/vendor/github.com/containerd/continuity/resource.go +++ /dev/null @@ -1,590 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "errors" - "fmt" - "os" - "reflect" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): A record based model, somewhat sketched out at the bottom -// of this file, will be more flexible. Another possibly is to tie the package -// interface directly to the protobuf type. This will have efficiency -// advantages at the cost coupling the nasty codegen types to the exported -// interface. - -type Resource interface { - // Path provides the primary resource path relative to the bundle root. In - // cases where resources have more than one path, such as with hard links, - // this will return the primary path, which is often just the first entry. - Path() string - - // Mode returns the - Mode() os.FileMode - - UID() int64 - GID() int64 -} - -// ByPath provides the canonical sort order for a set of resources. Use with -// sort.Stable for deterministic sorting. -type ByPath []Resource - -func (bp ByPath) Len() int { return len(bp) } -func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } -func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() } - -type XAttrer interface { - XAttrs() map[string][]byte -} - -// Hardlinkable is an interface that a resource type satisfies if it can be a -// hardlink target. -type Hardlinkable interface { - // Paths returns all paths of the resource, including the primary path - // returned by Resource.Path. If len(Paths()) > 1, the resource is a hard - // link. - Paths() []string -} - -type RegularFile interface { - Resource - XAttrer - Hardlinkable - - Size() int64 - Digests() []digest.Digest -} - -// Merge two or more Resources into new file. Typically, this should be -// used to merge regular files as hardlinks. If the files are not identical, -// other than Paths and Digests, the merge will fail and an error will be -// returned. -func Merge(fs ...Resource) (Resource, error) { - if len(fs) < 1 { - return nil, fmt.Errorf("please provide a resource to merge") - } - - if len(fs) == 1 { - return fs[0], nil - } - - var paths []string - var digests []digest.Digest - bypath := map[string][]Resource{} - - // The attributes are all compared against the first to make sure they - // agree before adding to the above collections. If any of these don't - // correctly validate, the merge fails. - prototype := fs[0] - xattrs := make(map[string][]byte) - - // initialize xattrs for use below. All files must have same xattrs. - if prototypeXAttrer, ok := prototype.(XAttrer); ok { - for attr, value := range prototypeXAttrer.XAttrs() { - xattrs[attr] = value - } - } - - for _, f := range fs { - h, isHardlinkable := f.(Hardlinkable) - if !isHardlinkable { - return nil, errNotAHardLink - } - - if f.Mode() != prototype.Mode() { - return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode()) - } - - if f.UID() != prototype.UID() { - return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID()) - } - - if f.GID() != prototype.GID() { - return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID()) - } - - if xattrer, ok := f.(XAttrer); ok { - fxattrs := xattrer.XAttrs() - if !reflect.DeepEqual(fxattrs, xattrs) { - return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs) - } - } - - for _, p := range h.Paths() { - pfs, ok := bypath[p] - if !ok { - // ensure paths are unique by only appending on a new path. - paths = append(paths, p) - } - - bypath[p] = append(pfs, f) - } - - if regFile, isRegFile := f.(RegularFile); isRegFile { - prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile) - if !prototypeIsRegFile { - return nil, errors.New("prototype is not a regular file") - } - - if regFile.Size() != prototypeRegFile.Size() { - return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size()) - } - - digests = append(digests, regFile.Digests()...) - } else if device, isDevice := f.(Device); isDevice { - prototypeDevice, prototypeIsDevice := prototype.(Device) - if !prototypeIsDevice { - return nil, errors.New("prototype is not a device") - } - - if device.Major() != prototypeDevice.Major() { - return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major()) - } - if device.Minor() != prototypeDevice.Minor() { - return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor()) - } - } else if _, isNamedPipe := f.(NamedPipe); isNamedPipe { - _, prototypeIsNamedPipe := prototype.(NamedPipe) - if !prototypeIsNamedPipe { - return nil, errors.New("prototype is not a named pipe") - } - } else { - return nil, errNotAHardLink - } - } - - sort.Stable(sort.StringSlice(paths)) - - // Choose a "canonical" file. Really, it is just the first file to sort - // against. We also effectively select the very first digest as the - // "canonical" one for this file. - first := bypath[paths[0]][0] - - resource := resource{ - paths: paths, - mode: first.Mode(), - uid: first.UID(), - gid: first.GID(), - xattrs: xattrs, - } - - switch typedF := first.(type) { - case RegularFile: - var err error - digests, err = uniqifyDigests(digests...) - if err != nil { - return nil, err - } - - return ®ularFile{ - resource: resource, - size: typedF.Size(), - digests: digests, - }, nil - case Device: - return &device{ - resource: resource, - major: typedF.Major(), - minor: typedF.Minor(), - }, nil - - case NamedPipe: - return &namedPipe{ - resource: resource, - }, nil - - default: - return nil, errNotAHardLink - } -} - -type Directory interface { - Resource - XAttrer - - // Directory is a no-op method to identify directory objects by interface. - Directory() -} - -type SymLink interface { - Resource - - // Target returns the target of the symlink contained in the . - Target() string -} - -type NamedPipe interface { - Resource - Hardlinkable - XAttrer - - // Pipe is a no-op method to allow consistent resolution of NamedPipe - // interface. - Pipe() -} - -type Device interface { - Resource - Hardlinkable - XAttrer - - Major() uint64 - Minor() uint64 -} - -type resource struct { - paths []string - mode os.FileMode - uid, gid int64 - xattrs map[string][]byte -} - -var _ Resource = &resource{} - -func (r *resource) Path() string { - if len(r.paths) < 1 { - return "" - } - - return r.paths[0] -} - -func (r *resource) Mode() os.FileMode { - return r.mode -} - -func (r *resource) UID() int64 { - return r.uid -} - -func (r *resource) GID() int64 { - return r.gid -} - -type regularFile struct { - resource - size int64 - digests []digest.Digest -} - -var _ RegularFile = ®ularFile{} - -// newRegularFile returns the RegularFile, using the populated base resource -// and one or more digests of the content. -func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) { - if !base.Mode().IsRegular() { - return nil, fmt.Errorf("not a regular file") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - // make our own copy of digests - ds := make([]digest.Digest, len(dgsts)) - copy(ds, dgsts) - - return ®ularFile{ - resource: base, - size: size, - digests: ds, - }, nil -} - -func (rf *regularFile) Paths() []string { - paths := make([]string, len(rf.paths)) - copy(paths, rf.paths) - return paths -} - -func (rf *regularFile) Size() int64 { - return rf.size -} - -func (rf *regularFile) Digests() []digest.Digest { - digests := make([]digest.Digest, len(rf.digests)) - copy(digests, rf.digests) - return digests -} - -func (rf *regularFile) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(rf.xattrs)) - - for attr, value := range rf.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type directory struct { - resource -} - -var _ Directory = &directory{} - -func newDirectory(base resource) (Directory, error) { - if !base.Mode().IsDir() { - return nil, fmt.Errorf("not a directory") - } - - return &directory{ - resource: base, - }, nil -} - -func (d *directory) Directory() {} - -func (d *directory) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type symLink struct { - resource - target string -} - -var _ SymLink = &symLink{} - -func newSymLink(base resource, target string) (SymLink, error) { - if base.Mode()&os.ModeSymlink == 0 { - return nil, fmt.Errorf("not a symlink") - } - - return &symLink{ - resource: base, - target: target, - }, nil -} - -func (l *symLink) Target() string { - return l.target -} - -type namedPipe struct { - resource -} - -var _ NamedPipe = &namedPipe{} - -func newNamedPipe(base resource, paths []string) (NamedPipe, error) { - if base.Mode()&os.ModeNamedPipe == 0 { - return nil, fmt.Errorf("not a namedpipe") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &namedPipe{ - resource: base, - }, nil -} - -func (np *namedPipe) Pipe() {} - -func (np *namedPipe) Paths() []string { - paths := make([]string, len(np.paths)) - copy(paths, np.paths) - return paths -} - -func (np *namedPipe) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(np.xattrs)) - - for attr, value := range np.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type device struct { - resource - major, minor uint64 -} - -var _ Device = &device{} - -func newDevice(base resource, paths []string, major, minor uint64) (Device, error) { - if base.Mode()&os.ModeDevice == 0 { - return nil, fmt.Errorf("not a device") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &device{ - resource: base, - major: major, - minor: minor, - }, nil -} - -func (d *device) Paths() []string { - paths := make([]string, len(d.paths)) - copy(paths, d.paths) - return paths -} - -func (d *device) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -func (d device) Major() uint64 { - return d.major -} - -func (d device) Minor() uint64 { - return d.minor -} - -// toProto converts a resource to a protobuf record. We'd like to push this -// the individual types but we want to keep this all together during -// prototyping. -func toProto(resource Resource) *pb.Resource { - b := &pb.Resource{ - Path: []string{resource.Path()}, - Mode: uint32(resource.Mode()), - Uid: resource.UID(), - Gid: resource.GID(), - } - - if xattrer, ok := resource.(XAttrer); ok { - // Sorts the XAttrs by name for consistent ordering. - keys := []string{} - xattrs := xattrer.XAttrs() - for k := range xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]}) - } - } - - switch r := resource.(type) { - case RegularFile: - b.Path = r.Paths() - b.Size = uint64(r.Size()) - - for _, dgst := range r.Digests() { - b.Digest = append(b.Digest, dgst.String()) - } - case SymLink: - b.Target = r.Target() - case Device: - b.Major, b.Minor = r.Major(), r.Minor() - b.Path = r.Paths() - case NamedPipe: - b.Path = r.Paths() - } - - // enforce a few stability guarantees that may not be provided by the - // resource implementation. - sort.Strings(b.Path) - - return b -} - -// fromProto converts from a protobuf Resource to a Resource interface. -func fromProto(b *pb.Resource) (Resource, error) { - base := &resource{ - paths: b.Path, - mode: os.FileMode(b.Mode), - uid: b.Uid, - gid: b.Gid, - } - - base.xattrs = make(map[string][]byte, len(b.Xattr)) - - for _, attr := range b.Xattr { - base.xattrs[attr.Name] = attr.Data - } - - switch { - case base.Mode().IsRegular(): - dgsts := make([]digest.Digest, len(b.Digest)) - for i, dgst := range b.Digest { - // TODO(stevvooe): Should we be validating at this point? - dgsts[i] = digest.Digest(dgst) - } - - return newRegularFile(*base, b.Path, int64(b.Size), dgsts...) - case base.Mode().IsDir(): - return newDirectory(*base) - case base.Mode()&os.ModeSymlink != 0: - return newSymLink(*base, b.Target) - case base.Mode()&os.ModeNamedPipe != 0: - return newNamedPipe(*base, b.Path) - case base.Mode()&os.ModeDevice != 0: - return newDevice(*base, b.Path, b.Major, b.Minor) - } - - return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) -} - -// NOTE(stevvooe): An alternative model that supports inline declaration. -// Convenient for unit testing where inline declarations may be desirable but -// creates an awkward API for the standard use case. - -// type ResourceKind int - -// const ( -// ResourceRegularFile = iota + 1 -// ResourceDirectory -// ResourceSymLink -// Resource -// ) - -// type Resource struct { -// Kind ResourceKind -// Paths []string -// Mode os.FileMode -// UID string -// GID string -// Size int64 -// Digests []digest.Digest -// Target string -// Major, Minor int -// XAttrs map[string][]byte -// } - -// type RegularFile struct { -// Paths []string -// Size int64 -// Digests []digest.Digest -// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid -// } diff --git a/vendor/github.com/containerd/continuity/resource_unix.go b/vendor/github.com/containerd/continuity/resource_unix.go deleted file mode 100644 index 0e103ccc5c7a0..0000000000000 --- a/vendor/github.com/containerd/continuity/resource_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux darwin freebsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - // TODO(stevvooe): This need to be resolved for the container's root, - // where here we are really getting the host OS's value. We need to allow - // this be passed in and fixed up to make these uid/gid mappings portable. - // Either this can be part of the driver or we can achieve it through some - // other mechanism. - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - // TODO(stevvooe): This may not be a hard error for all platforms. We - // may want to move this to the driver. - return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi) - } - - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - - uid: int64(sys.Uid), - gid: int64(sys.Gid), - - // NOTE(stevvooe): Population of shared xattrs field is deferred to - // the resource types that populate it. Since they are a property of - // the context, they must set there. - }, nil -} diff --git a/vendor/github.com/containerd/fifo/raw.go b/vendor/github.com/containerd/fifo/raw.go deleted file mode 100644 index acc303e43759e..0000000000000 --- a/vendor/github.com/containerd/fifo/raw.go +++ /dev/null @@ -1,116 +0,0 @@ -// +build go1.12 - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fifo - -import ( - "syscall" - - "github.com/pkg/errors" -) - -// SyscallConn provides raw access to the fifo's underlying filedescrptor. -// See syscall.Conn for guarentees provided by this interface. -func (f *fifo) SyscallConn() (syscall.RawConn, error) { - // deterministic check for closed - select { - case <-f.closed: - return nil, errors.New("fifo closed") - default: - } - - select { - case <-f.closed: - return nil, errors.New("fifo closed") - case <-f.opened: - return f.file.SyscallConn() - default: - } - - // Not opened and not closed, this means open is non-blocking AND it's not open yet - // Use rawConn to deal with non-blocking open. - rc := &rawConn{f: f, ready: make(chan struct{})} - go func() { - select { - case <-f.closed: - return - case <-f.opened: - rc.raw, rc.err = f.file.SyscallConn() - close(rc.ready) - } - }() - - return rc, nil -} - -type rawConn struct { - f *fifo - ready chan struct{} - raw syscall.RawConn - err error -} - -func (r *rawConn) Control(f func(fd uintptr)) error { - select { - case <-r.f.closed: - return errors.New("control of closed fifo") - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Control(f) -} - -func (r *rawConn) Read(f func(fd uintptr) (done bool)) error { - if r.f.flag&syscall.O_WRONLY > 0 { - return errors.New("reading from write-only fifo") - } - - select { - case <-r.f.closed: - return errors.New("reading of a closed fifo") - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Read(f) -} - -func (r *rawConn) Write(f func(fd uintptr) (done bool)) error { - if r.f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { - return errors.New("writing to read-only fifo") - } - - select { - case <-r.f.closed: - return errors.New("writing to a closed fifo") - case <-r.ready: - } - - if r.err != nil { - return r.err - } - - return r.raw.Write(f) -} diff --git a/vendor/github.com/containerd/fifo/readme.md b/vendor/github.com/containerd/fifo/readme.md index 30e233cc69d32..2b41b3b1ca376 100644 --- a/vendor/github.com/containerd/fifo/readme.md +++ b/vendor/github.com/containerd/fifo/readme.md @@ -1,7 +1,6 @@ ### fifo [![Build Status](https://travis-ci.org/containerd/fifo.svg?branch=master)](https://travis-ci.org/containerd/fifo) -[![codecov](https://codecov.io/gh/containerd/fifo/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/fifo) Go package for handling fifos in a sane way. @@ -31,14 +30,3 @@ func (f *fifo) Write(b []byte) (int, error) // before open(2) has returned and fifo was never opened. func (f *fifo) Close() error ``` - -## Project details - -The fifo is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go index 96262afab3964..613cc511c6834 100644 --- a/vendor/github.com/containerd/go-runc/runc.go +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -275,7 +275,11 @@ func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) if err != nil { return -1, err } - return Monitor.Wait(cmd, ec) + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return status, err } type DeleteOpts struct { @@ -570,7 +574,11 @@ func (r *Runc) Restore(context context.Context, id, bundle string, opts *Restore } } } - return Monitor.Wait(cmd, ec) + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return status, err } // Update updates the current container with the provided resource spec diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go index 22f5496b4b952..aa8c9541cf357 100644 --- a/vendor/github.com/containerd/ttrpc/channel.go +++ b/vendor/github.com/containerd/ttrpc/channel.go @@ -18,7 +18,6 @@ package ttrpc import ( "bufio" - "context" "encoding/binary" "io" "net" @@ -98,7 +97,7 @@ func newChannel(conn net.Conn) *channel { // returned will be valid and caller should send that along to // the correct consumer. The bytes on the underlying channel // will be discarded. -func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { +func (ch *channel) recv() (messageHeader, []byte, error) { mh, err := readMessageHeader(ch.hrbuf[:], ch.br) if err != nil { return messageHeader{}, nil, err @@ -120,7 +119,7 @@ func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { return mh, p, nil } -func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error { +func (ch *channel) send(streamID uint32, t messageType, p []byte) error { if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { return err } diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index 35ca91fbaeee3..9db15fe69e7fc 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -36,36 +36,52 @@ import ( // closed. var ErrClosed = errors.New("ttrpc: closed") +// Client for a ttrpc server type Client struct { codec codec conn net.Conn channel *channel calls chan *callRequest - closed chan struct{} - closeOnce sync.Once - closeFunc func() - done chan struct{} - err error + ctx context.Context + closed func() + + closeOnce sync.Once + userCloseFunc func() + + errOnce sync.Once + err error + interceptor UnaryClientInterceptor } +// ClientOpts configures a client type ClientOpts func(c *Client) +// WithOnClose sets the close func whenever the client's Close() method is called func WithOnClose(onClose func()) ClientOpts { return func(c *Client) { - c.closeFunc = onClose + c.userCloseFunc = onClose + } +} + +// WithUnaryClientInterceptor sets the provided client interceptor +func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { + return func(c *Client) { + c.interceptor = i } } func NewClient(conn net.Conn, opts ...ClientOpts) *Client { + ctx, cancel := context.WithCancel(context.Background()) c := &Client{ - codec: codec{}, - conn: conn, - channel: newChannel(conn), - calls: make(chan *callRequest), - closed: make(chan struct{}), - done: make(chan struct{}), - closeFunc: func() {}, + codec: codec{}, + conn: conn, + channel: newChannel(conn), + calls: make(chan *callRequest), + closed: cancel, + ctx: ctx, + userCloseFunc: func() {}, + interceptor: defaultClientInterceptor, } for _, o := range opts { @@ -99,11 +115,18 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int cresp = &Response{} ) + if metadata, ok := GetMetadata(ctx); ok { + metadata.setRequest(creq) + } + if dl, ok := ctx.Deadline(); ok { creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() } - if err := c.dispatch(ctx, creq, cresp); err != nil { + info := &UnaryClientInfo{ + FullMethod: fullPath(service, method), + } + if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { return err } @@ -131,8 +154,8 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err case <-ctx.Done(): return ctx.Err() case c.calls <- call: - case <-c.done: - return c.err + case <-c.ctx.Done(): + return c.error() } select { @@ -140,16 +163,15 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err return ctx.Err() case err := <-errs: return filterCloseErr(err) - case <-c.done: - return c.err + case <-c.ctx.Done(): + return c.error() } } func (c *Client) Close() error { c.closeOnce.Do(func() { - close(c.closed) + c.closed() }) - return nil } @@ -159,51 +181,82 @@ type message struct { err error } -func (c *Client) run() { - var ( - streamID uint32 = 1 - waiters = make(map[uint32]*callRequest) - calls = c.calls - incoming = make(chan *message) - shutdown = make(chan struct{}) - shutdownErr error - ) +type receiver struct { + wg *sync.WaitGroup + messages chan *message + err error +} - go func() { - defer close(shutdown) +func (r *receiver) run(ctx context.Context, c *channel) { + defer r.wg.Done() - // start one more goroutine to recv messages without blocking. - for { - mh, p, err := c.channel.recv(context.TODO()) + for { + select { + case <-ctx.Done(): + r.err = ctx.Err() + return + default: + mh, p, err := c.recv() if err != nil { _, ok := status.FromError(err) if !ok { // treat all errors that are not an rpc status as terminal. // all others poison the connection. - shutdownErr = err + r.err = filterCloseErr(err) return } } select { - case incoming <- &message{ + case r.messages <- &message{ messageHeader: mh, p: p[:mh.Length], err: err, }: - case <-c.done: + case <-ctx.Done(): + r.err = ctx.Err() return } } + } +} + +func (c *Client) run() { + var ( + streamID uint32 = 1 + waiters = make(map[uint32]*callRequest) + calls = c.calls + incoming = make(chan *message) + receiversDone = make(chan struct{}) + wg sync.WaitGroup + ) + + // broadcast the shutdown error to the remaining waiters. + abortWaiters := func(wErr error) { + for _, waiter := range waiters { + waiter.errs <- wErr + } + } + recv := &receiver{ + wg: &wg, + messages: incoming, + } + wg.Add(1) + + go func() { + wg.Wait() + close(receiversDone) }() + go recv.run(c.ctx, c.channel) - defer c.conn.Close() - defer close(c.done) - defer c.closeFunc() + defer func() { + c.conn.Close() + c.userCloseFunc() + }() for { select { case call := <-calls: - if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil { + if err := c.send(streamID, messageTypeRequest, call.req); err != nil { call.errs <- err continue } @@ -219,41 +272,42 @@ func (c *Client) run() { call.errs <- c.recv(call.resp, msg) delete(waiters, msg.StreamID) - case <-shutdown: - if shutdownErr != nil { - shutdownErr = filterCloseErr(shutdownErr) - } else { - shutdownErr = ErrClosed - } - - shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down") - - c.err = shutdownErr - for _, waiter := range waiters { - waiter.errs <- shutdownErr + case <-receiversDone: + // all the receivers have exited + if recv.err != nil { + c.setError(recv.err) } + // don't return out, let the close of the context trigger the abort of waiters c.Close() - return - case <-c.closed: - if c.err == nil { - c.err = ErrClosed - } - // broadcast the shutdown error to the remaining waiters. - for _, waiter := range waiters { - waiter.errs <- c.err - } + case <-c.ctx.Done(): + abortWaiters(c.error()) return } } } -func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error { +func (c *Client) error() error { + c.errOnce.Do(func() { + if c.err == nil { + c.err = ErrClosed + } + }) + return c.err +} + +func (c *Client) setError(err error) { + c.errOnce.Do(func() { + c.err = err + }) +} + +func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { p, err := c.codec.Marshal(msg) if err != nil { return err } - return c.channel.send(ctx, streamID, mtype, p) + return c.channel.send(streamID, mtype, p) } func (c *Client) recv(resp *Response, msg *message) error { @@ -274,22 +328,21 @@ func (c *Client) recv(resp *Response, msg *message) error { // // This purposely ignores errors with a wrapped cause. func filterCloseErr(err error) error { - if err == nil { + switch { + case err == nil: return nil - } - - if err == io.EOF { + case err == io.EOF: return ErrClosed - } - - if strings.Contains(err.Error(), "use of closed network connection") { + case errors.Cause(err) == io.EOF: return ErrClosed - } - - // if we have an epipe on a write, we cast to errclosed - if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" { - if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE { - return ErrClosed + case strings.Contains(err.Error(), "use of closed network connection"): + return ErrClosed + default: + // if we have an epipe on a write, we cast to errclosed + if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" { + if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE { + return ErrClosed + } } } diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go index 019b7a09dd85d..6a53c112b7b45 100644 --- a/vendor/github.com/containerd/ttrpc/config.go +++ b/vendor/github.com/containerd/ttrpc/config.go @@ -19,9 +19,11 @@ package ttrpc import "github.com/pkg/errors" type serverConfig struct { - handshaker Handshaker + handshaker Handshaker + interceptor UnaryServerInterceptor } +// ServerOpt for configuring a ttrpc server type ServerOpt func(*serverConfig) error // WithServerHandshaker can be passed to NewServer to ensure that the @@ -37,3 +39,14 @@ func WithServerHandshaker(handshaker Handshaker) ServerOpt { return nil } } + +// WithUnaryServerInterceptor sets the provided interceptor on the server +func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { + return func(c *serverConfig) error { + if c.interceptor != nil { + return errors.New("only one interceptor allowed per server") + } + c.interceptor = i + return nil + } +} diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go new file mode 100644 index 0000000000000..c1219dac65f6f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/interceptor.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "context" + +// UnaryServerInfo provides information about the server request +type UnaryServerInfo struct { + FullMethod string +} + +// UnaryClientInfo provides information about the client request +type UnaryClientInfo struct { + FullMethod string +} + +// Unmarshaler contains the server request data and allows it to be unmarshaled +// into a concrete type +type Unmarshaler func(interface{}) error + +// Invoker invokes the client's request and response from the ttrpc server +type Invoker func(context.Context, *Request, *Response) error + +// UnaryServerInterceptor specifies the interceptor function for server request/response +type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) + +// UnaryClientInterceptor specifies the interceptor function for client request/response +type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error + +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { + return method(ctx, unmarshal) +} + +func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { + return invoker(ctx, req, resp) +} diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go new file mode 100644 index 0000000000000..ce8c0d13c41b5 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/metadata.go @@ -0,0 +1,107 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "strings" +) + +// MD is the user type for ttrpc metadata +type MD map[string][]string + +// Get returns the metadata for a given key when they exist. +// If there is no metadata, a nil slice and false are returned. +func (m MD) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + list, ok := m[key] + if !ok || len(list) == 0 { + return nil, false + } + + return list, true +} + +// Set sets the provided values for a given key. +// The values will overwrite any existing values. +// If no values provided, a key will be deleted. +func (m MD) Set(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + delete(m, key) + return + } + m[key] = values +} + +// Append appends additional values to the given key. +func (m MD) Append(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + return + } + current, ok := m[key] + if ok { + m.Set(key, append(current, values...)...) + } else { + m.Set(key, values...) + } +} + +func (m MD) setRequest(r *Request) { + for k, values := range m { + for _, v := range values { + r.Metadata = append(r.Metadata, &KeyValue{ + Key: k, + Value: v, + }) + } + } +} + +func (m MD) fromRequest(r *Request) { + for _, kv := range r.Metadata { + m[kv.Key] = append(m[kv.Key], kv.Value) + } +} + +type metadataKey struct{} + +// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) +func GetMetadata(ctx context.Context) (MD, bool) { + metadata, ok := ctx.Value(metadataKey{}).(MD) + return metadata, ok +} + +// GetMetadataValue gets a specific metadata value by name from context.Context +func GetMetadataValue(ctx context.Context, name string) (string, bool) { + metadata, ok := GetMetadata(ctx) + if !ok { + return "", false + } + + if list, ok := metadata.Get(name); ok { + return list[0], true + } + + return "", false +} + +// WithMetadata attaches metadata map to a context.Context +func WithMetadata(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, metadataKey{}, md) +} diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index 40804eac0d998..1d4f1df653b20 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -53,10 +53,13 @@ func NewServer(opts ...ServerOpt) (*Server, error) { return nil, err } } + if config.interceptor == nil { + config.interceptor = defaultServerInterceptor + } return &Server{ config: config, - services: newServiceSet(), + services: newServiceSet(config.interceptor), done: make(chan struct{}), listeners: make(map[net.Listener]struct{}), connections: make(map[*serverConn]struct{}), @@ -341,7 +344,7 @@ func (c *serverConn) run(sctx context.Context) { default: // proceed } - mh, p, err := ch.recv(ctx) + mh, p, err := ch.recv() if err != nil { status, ok := status.FromError(err) if !ok { @@ -438,7 +441,7 @@ func (c *serverConn) run(sctx context.Context) { return } - if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil { + if err := ch.send(response.id, messageTypeResponse, p); err != nil { logrus.WithError(err).Error("failed sending message on channel") return } @@ -449,7 +452,12 @@ func (c *serverConn) run(sctx context.Context) { // branch. Basically, it means that we are no longer receiving // requests due to a terminal error. recvErr = nil // connection is now "closing" - if err != nil && err != io.EOF { + if err == io.EOF || err == io.ErrUnexpectedEOF { + // The client went away and we should stop processing + // requests, so that the client connection is closed + return + } + if err != nil { logrus.WithError(err).Error("error receiving message") } case <-shutdown: @@ -461,6 +469,12 @@ func (c *serverConn) run(sctx context.Context) { var noopFunc = func() {} func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { + if len(req.Metadata) > 0 { + md := MD{} + md.fromRequest(req) + ctx = WithMetadata(ctx, md) + } + cancel = noopFunc if req.TimeoutNano == 0 { return ctx, cancel diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go index fe1cade5ad270..655b2caea3e68 100644 --- a/vendor/github.com/containerd/ttrpc/services.go +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -37,12 +37,14 @@ type ServiceDesc struct { } type serviceSet struct { - services map[string]ServiceDesc + services map[string]ServiceDesc + interceptor UnaryServerInterceptor } -func newServiceSet() *serviceSet { +func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { return &serviceSet{ - services: make(map[string]ServiceDesc), + services: make(map[string]ServiceDesc), + interceptor: interceptor, } } @@ -84,7 +86,11 @@ func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName strin return nil } - resp, err := method(ctx, unmarshal) + info := &UnaryServerInfo{ + FullMethod: fullPath(serviceName, methodName), + } + + resp, err := s.interceptor(ctx, unmarshal, info, method) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go index a6b3b818e0dcd..9a1c19a7238de 100644 --- a/vendor/github.com/containerd/ttrpc/types.go +++ b/vendor/github.com/containerd/ttrpc/types.go @@ -23,10 +23,11 @@ import ( ) type Request struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3"` - Method string `protobuf:"bytes,2,opt,name=method,proto3"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` + Service string `protobuf:"bytes,1,opt,name=service,proto3"` + Method string `protobuf:"bytes,2,opt,name=method,proto3"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` } func (r *Request) Reset() { *r = Request{} } @@ -41,3 +42,22 @@ type Response struct { func (r *Response) Reset() { *r = Response{} } func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } func (r *Response) ProtoMessage() {} + +type StringList struct { + List []string `protobuf:"bytes,1,rep,name=list,proto3"` +} + +func (r *StringList) Reset() { *r = StringList{} } +func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } +func (r *StringList) ProtoMessage() {} + +func makeStringList(item ...string) StringList { return StringList{List: item} } + +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3"` + Value string `protobuf:"bytes,2,opt,name=value,proto3"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (*KeyValue) ProtoMessage() {} +func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE index 584149b6ee28c..261eeb9e9f8b2 100644 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ b/vendor/github.com/containerd/typeurl/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md index 67f1b8447f07b..e0787743c5dad 100644 --- a/vendor/github.com/containerd/typeurl/README.md +++ b/vendor/github.com/containerd/typeurl/README.md @@ -7,13 +7,3 @@ A Go package for managing the registration, marshaling, and unmarshaling of encoded types. This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd2053339..0000000000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go index 4f9c069f4d38d..153c488d0aa62 100644 --- a/vendor/github.com/containerd/typeurl/types.go +++ b/vendor/github.com/containerd/typeurl/types.go @@ -78,10 +78,7 @@ func Is(any *types.Any, v interface{}) bool { return any.TypeUrl == url } -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. +// MarshalAny marshals the value v into an any with the correct TypeUrl func MarshalAny(v interface{}) (*types.Any, error) { var marshal func(v interface{}) ([]byte, error) switch t := v.(type) { diff --git a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go index ced6b9bf07c9f..1825ce654768a 100644 --- a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go +++ b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go @@ -50,21 +50,46 @@ func (l *local) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, e } func (l *local) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { - var do leases.DeleteOptions - for _, opt := range opts { - if err := opt(ctx, &do); err != nil { - return err - } + if err := l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).Delete(ctx, lease, opts...) + }); err != nil { + return err + } + + return nil +} + +func (l *local) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + if err := l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).AddResource(ctx, lease, r) + }); err != nil { + return err } + return nil +} + +func (l *local) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { if err := l.db.Update(func(tx *bolt.Tx) error { - return metadata.NewLeaseManager(tx).Delete(ctx, lease) + return metadata.NewLeaseManager(tx).AddResource(ctx, lease, r) }); err != nil { return err } return nil +} + +func (l *local) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + var res []leases.Resource + if err := l.db.Update(func(tx *bolt.Tx) error { + var err error + res, err = metadata.NewLeaseManager(tx).ListResources(ctx, lease) + return err + }); err != nil { + return nil, err + } + return res, nil } func (l *local) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { From 326c6efd8bc098157652e703777396e81bd986cd Mon Sep 17 00:00:00 2001 From: Sam Whited Date: Thu, 3 Oct 2019 13:54:44 -0400 Subject: [PATCH 73/73] Fix containerdServiceOpt type Signed-off-by: Sam Whited --- daemon/images/service_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/images/service_test.go b/daemon/images/service_test.go index 3b612d308d6a1..4e740cbc0bc93 100644 --- a/daemon/images/service_test.go +++ b/daemon/images/service_test.go @@ -94,7 +94,7 @@ func containerdServiceOpt(ctx context.Context, root string) (containerd.ClientOp // load the plugin specific configuration if it is provided if p.Config != nil { - pluginConfig, err := config.Decode(p.ID, p.Config) + pluginConfig, err := config.Decode(p) if err != nil { return nil, err }