From 90a193c8352f0673e365992b9ea3b86a324e8edb Mon Sep 17 00:00:00 2001 From: Daniel Canter Date: Thu, 1 Jul 2021 06:15:30 -0700 Subject: [PATCH] Add volume mount support for job containers This adds basic directory mount support for job containers. As any path on the host is already accessible from the container, the concept of volume mounts is a bit funny for job containers. However, it still makes sense to treat the volume mount point where the container image is mounted as where most things should be found regarding the container. The manner in which this is done is by appending the container mount path for the volume to where the rootfs volume is mounted on the host and then symlinking it. So: Container rootfs volume path = "C:\C\123456789abcdefgh\" Example #1 -------------- { "host_path": "C:\mydir" "container_path": "\dir\in\container" } "C:\mydir" would be symlinked to "C:\C\123456789abcdefgh\dir\in\container" Example #2 --------------- Drive letters will be stripped { "host_path": "C:\mydir" "container_path": "C:\dir\in\container" } "C:\mydir" would be symlinked to "C:\C\123456789abcdefgh\dir\in\container" Signed-off-by: Daniel Canter --- internal/jobcontainers/jobcontainer.go | 6 +- internal/jobcontainers/mounts.go | 56 ++++++++++ internal/jobcontainers/mounts_test.go | 21 ++++ test/cri-containerd/jobcontainer_test.go | 134 +++++++++++++++++++++-- 4 files changed, 208 insertions(+), 9 deletions(-) create mode 100644 internal/jobcontainers/mounts.go create mode 100644 internal/jobcontainers/mounts_test.go diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index 88fbfb05e8..191b475be9 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -139,6 +139,10 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ * layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, sandboxPath, false) r.SetLayers(layers) + if err := setupMounts(s, container.sandboxMount); err != nil { + return nil, nil, err + } + volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil { return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path) @@ -524,7 +528,7 @@ func systemProcessInformation() ([]*winapi.SYSTEM_PROCESS_INFORMATION, error) { var ( systemProcInfo *winapi.SYSTEM_PROCESS_INFORMATION procInfos []*winapi.SYSTEM_PROCESS_INFORMATION - // This happens to be the buffer size hcs uses but there's no really no hard need to keep it + // This happens to be the buffer size hcs uses but there's really no hard need to keep it // the same, it's just a sane default. size = uint32(1024 * 512) bounds uintptr diff --git a/internal/jobcontainers/mounts.go b/internal/jobcontainers/mounts.go new file mode 100644 index 0000000000..187c340de2 --- /dev/null +++ b/internal/jobcontainers/mounts.go @@ -0,0 +1,56 @@ +package jobcontainers + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// namedPipePath returns true if the given path is to a named pipe. +func isnamedPipePath(p string) bool { + return strings.HasPrefix(p, `\\.\pipe\`) +} + +// Strip the drive letter (if there is one) so we don't end up with "%CONTAINER_SANDBOX_MOUNT_POINT%"\C:\path\to\mount +func stripDriveLetter(name string) string { + // Remove drive letter + if len(name) == 2 && name[1] == ':' { + name = "." + } else if len(name) > 2 && name[1] == ':' { + name = name[2:] + } + return name +} + +// setupMounts adds the custom mounts requested in the OCI runtime spec. Mounts are a bit funny as you already have +// access to everything on the host, so just symlink in whatever was requested to the path where the container volume +// is mounted. At least then the mount can be accessed from a path relative to the default working directory/where the volume +// is. +func setupMounts(spec *specs.Spec, sandboxVolumePath string) error { + for _, mount := range spec.Mounts { + if mount.Destination == "" || mount.Source == "" { + return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount) + } + + if isnamedPipePath(mount.Source) { + return errors.New("named pipe mounts not supported for job containers - interact with the pipe directly") + } + + fullCtrPath := filepath.Join(sandboxVolumePath, stripDriveLetter(mount.Destination)) + // Make sure all of the dirs leading up to the full path exist. + strippedCtrPath := filepath.Dir(fullCtrPath) + if err := os.MkdirAll(strippedCtrPath, 0777); err != nil { + return errors.Wrap(err, "failed to make directory for job container mount") + } + + if err := os.Symlink(mount.Source, fullCtrPath); err != nil { + return errors.Wrap(err, "failed to setup mount for job container") + } + } + + return nil +} diff --git a/internal/jobcontainers/mounts_test.go b/internal/jobcontainers/mounts_test.go new file mode 100644 index 0000000000..22d61dfcd4 --- /dev/null +++ b/internal/jobcontainers/mounts_test.go @@ -0,0 +1,21 @@ +package jobcontainers + +import ( + "testing" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestNamePipeDeny(t *testing.T) { + s := &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/path/in/container", + Source: `\\.\pipe\dummy\path`, + }, + }, + } + if err := setupMounts(s, "/test"); err == nil { + t.Fatal("expected named pipe mount validation to fail for job container") + } +} diff --git a/test/cri-containerd/jobcontainer_test.go b/test/cri-containerd/jobcontainer_test.go index d1d0678dd8..e5bbeec589 100644 --- a/test/cri-containerd/jobcontainer_test.go +++ b/test/cri-containerd/jobcontainer_test.go @@ -33,7 +33,7 @@ func getJobContainerPodRequestWCOW(t *testing.T) *runtime.RunPodSandboxRequest { } } -func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string) *runtime.CreateContainerRequest { +func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string, mounts []*runtime.Mount) *runtime.CreateContainerRequest { return &runtime.CreateContainerRequest{ Config: &runtime.ContainerConfig{ Metadata: &runtime.ContainerMetadata{ @@ -49,11 +49,12 @@ func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.P "-t", "127.0.0.1", }, - + Mounts: mounts, Annotations: map[string]string{ "microsoft.com/hostprocess-container": "true", "microsoft.com/hostprocess-inherit-user": "true", }, + Windows: &runtime.WindowsContainerConfig{}, }, PodSandboxId: podID, SandboxConfig: podConfig, @@ -74,7 +75,7 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -113,7 +114,7 @@ func Test_RunContainer_Hostname_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -146,7 +147,7 @@ func Test_RunContainer_HNS_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -193,7 +194,7 @@ func Test_RunContainer_VHD_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -244,7 +245,7 @@ func Test_RunContainer_ETW_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -299,7 +300,7 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) { defer removePodSandbox(t, client, podctx, podID) defer stopPodSandbox(t, client, podctx, podID) - containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver) + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() @@ -326,3 +327,120 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) { t.Fatalf("expected volumes to be the same within job process container. got %q but expected %q", hostStdout, containerStdout) } } + +func Test_RunContainer_JobContainer_VolumeMount(t *testing.T) { + client := newTestRuntimeClient(t) + + dir, err := ioutil.TempDir("", "example") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + tmpfn := filepath.Join(dir, "tmpfile") + if err := ioutil.WriteFile(tmpfn, []byte("test"), 0777); err != nil { + t.Fatal(err) + } + + mountDriveLetter := []*runtime.Mount{ + { + HostPath: dir, + ContainerPath: "C:\\path\\in\\container", + }, + } + + mountNoDriveLetter := []*runtime.Mount{ + { + HostPath: dir, + ContainerPath: "/path/in/container", + }, + } + + mountSingleFile := []*runtime.Mount{ + { + HostPath: tmpfn, + ContainerPath: "/path/in/container/testfile", + }, + } + + type config struct { + name string + containerName string + requiredFeatures []string + runtimeHandler string + sandboxImage string + containerImage string + cmd []string + exec []string + mounts []*runtime.Mount + } + + tests := []config{ + { + name: "JobContainer_VolumeMount_DriveLetter", + containerName: t.Name() + "-Container-DriveLetter", + requiredFeatures: []string{featureWCOWProcess, featureHostProcess}, + runtimeHandler: wcowProcessRuntimeHandler, + sandboxImage: imageWindowsNanoserver, + containerImage: imageWindowsNanoserver, + cmd: []string{"ping", "-t", "127.0.0.1"}, + mounts: mountDriveLetter, + exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"}, + }, + { + name: "JobContainer_VolumeMount_NoDriveLetter", + containerName: t.Name() + "-Container-NoDriveLetter", + requiredFeatures: []string{featureWCOWProcess, featureHostProcess}, + runtimeHandler: wcowProcessRuntimeHandler, + sandboxImage: imageWindowsNanoserver, + containerImage: imageWindowsNanoserver, + cmd: []string{"ping", "-t", "127.0.0.1"}, + mounts: mountNoDriveLetter, + exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"}, + }, + { + name: "JobContainer_VolumeMount_SingleFile", + containerName: t.Name() + "-Container-SingleFile", + requiredFeatures: []string{featureWCOWProcess, featureHostProcess}, + runtimeHandler: wcowProcessRuntimeHandler, + sandboxImage: imageWindowsNanoserver, + containerImage: imageWindowsNanoserver, + cmd: []string{"ping", "-t", "127.0.0.1"}, + mounts: mountSingleFile, + exec: []string{"cmd", "/c", "type", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\testfile"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + requireFeatures(t, test.requiredFeatures...) + + requiredImages := []string{test.sandboxImage, test.containerImage} + pullRequiredImages(t, requiredImages) + + podctx := context.Background() + sandboxRequest := getJobContainerPodRequestWCOW(t) + + podID := runPodSandbox(t, client, podctx, sandboxRequest) + defer removePodSandbox(t, client, podctx, podID) + defer stopPodSandbox(t, client, podctx, podID) + + containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, test.mounts) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + containerID := createContainer(t, client, ctx, containerRequest) + defer removeContainer(t, client, ctx, containerID) + startContainer(t, client, ctx, containerID) + defer stopContainer(t, client, ctx, containerID) + + r := execSync(t, client, ctx, &runtime.ExecSyncRequest{ + ContainerId: containerID, + Cmd: test.exec, + }) + if r.ExitCode != 0 { + t.Fatalf("failed with exit code %d checking for job container mount: %s", r.ExitCode, string(r.Stderr)) + } + }) + } +}