Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion internal/jobcontainers/jobcontainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ *
layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, sandboxPath, false)
r.SetLayers(layers)

if err := setupMounts(s, container.sandboxMount); err != nil {
return nil, nil, err
}

volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil {
return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path)
Expand Down Expand Up @@ -524,7 +528,7 @@ func systemProcessInformation() ([]*winapi.SYSTEM_PROCESS_INFORMATION, error) {
var (
systemProcInfo *winapi.SYSTEM_PROCESS_INFORMATION
procInfos []*winapi.SYSTEM_PROCESS_INFORMATION
// This happens to be the buffer size hcs uses but there's no really no hard need to keep it
// This happens to be the buffer size hcs uses but there's really no hard need to keep it
// the same, it's just a sane default.
size = uint32(1024 * 512)
bounds uintptr
Expand Down
56 changes: 56 additions & 0 deletions internal/jobcontainers/mounts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package jobcontainers

import (
"fmt"
"os"
"path/filepath"
"strings"

specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)

// namedPipePath returns true if the given path is to a named pipe.
func isnamedPipePath(p string) bool {
return strings.HasPrefix(p, `\\.\pipe\`)
}

// Strip the drive letter (if there is one) so we don't end up with "%CONTAINER_SANDBOX_MOUNT_POINT%"\C:\path\to\mount
func stripDriveLetter(name string) string {
// Remove drive letter
if len(name) == 2 && name[1] == ':' {
name = "."
} else if len(name) > 2 && name[1] == ':' {
name = name[2:]
}
return name
}

// setupMounts adds the custom mounts requested in the OCI runtime spec. Mounts are a bit funny as you already have
// access to everything on the host, so just symlink in whatever was requested to the path where the container volume
// is mounted. At least then the mount can be accessed from a path relative to the default working directory/where the volume
// is.
func setupMounts(spec *specs.Spec, sandboxVolumePath string) error {
for _, mount := range spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}

if isnamedPipePath(mount.Source) {
return errors.New("named pipe mounts not supported for job containers - interact with the pipe directly")
}

fullCtrPath := filepath.Join(sandboxVolumePath, stripDriveLetter(mount.Destination))
Comment thread
katiewasnothere marked this conversation as resolved.
// Make sure all of the dirs leading up to the full path exist.
strippedCtrPath := filepath.Dir(fullCtrPath)
if err := os.MkdirAll(strippedCtrPath, 0777); err != nil {
return errors.Wrap(err, "failed to make directory for job container mount")
}

if err := os.Symlink(mount.Source, fullCtrPath); err != nil {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there an expectation based on standard container mount behavior that a mount over an existing path will mask the existing path? os.Symlink will fail if the target exists.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correct yea. At the bindflt level I'd assume there's nothing stopping a shadowing of whatever was there, but I'm not sure if hcs has any logic to disallow this. Let me give it a whirl, that's a good point.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@msscotb Yea the behavior on Windows (and Linux but I knew this part) is to mask whatever is there. I don't know how we could mimic this without bindflt :/

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@msscotb Was this a blocking comment or just a "curious what happens in a normal container scenario"?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@msscotb ping on this

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see a better option since binding as we do for standard containers is not possible with the process isolated design current.

return errors.Wrap(err, "failed to setup mount for job container")
}
}

return nil
}
21 changes: 21 additions & 0 deletions internal/jobcontainers/mounts_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package jobcontainers

import (
"testing"

specs "github.com/opencontainers/runtime-spec/specs-go"
)

func TestNamePipeDeny(t *testing.T) {
s := &specs.Spec{
Mounts: []specs.Mount{
{
Destination: "/path/in/container",
Source: `\\.\pipe\dummy\path`,
},
},
}
if err := setupMounts(s, "/test"); err == nil {
t.Fatal("expected named pipe mount validation to fail for job container")
}
}
134 changes: 126 additions & 8 deletions test/cri-containerd/jobcontainer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func getJobContainerPodRequestWCOW(t *testing.T) *runtime.RunPodSandboxRequest {
}
}

func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string) *runtime.CreateContainerRequest {
func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string, mounts []*runtime.Mount) *runtime.CreateContainerRequest {
return &runtime.CreateContainerRequest{
Config: &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Expand All @@ -49,11 +49,12 @@ func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.P
"-t",
"127.0.0.1",
},

Mounts: mounts,
Annotations: map[string]string{
"microsoft.com/hostprocess-container": "true",
"microsoft.com/hostprocess-inherit-user": "true",
},
Windows: &runtime.WindowsContainerConfig{},
},
PodSandboxId: podID,
SandboxConfig: podConfig,
Expand All @@ -74,7 +75,7 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -113,7 +114,7 @@ func Test_RunContainer_Hostname_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -146,7 +147,7 @@ func Test_RunContainer_HNS_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -193,7 +194,7 @@ func Test_RunContainer_VHD_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -244,7 +245,7 @@ func Test_RunContainer_ETW_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -299,7 +300,7 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand All @@ -326,3 +327,120 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
t.Fatalf("expected volumes to be the same within job process container. got %q but expected %q", hostStdout, containerStdout)
}
}

func Test_RunContainer_JobContainer_VolumeMount(t *testing.T) {
client := newTestRuntimeClient(t)

dir, err := ioutil.TempDir("", "example")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

tmpfn := filepath.Join(dir, "tmpfile")
if err := ioutil.WriteFile(tmpfn, []byte("test"), 0777); err != nil {
t.Fatal(err)
}

mountDriveLetter := []*runtime.Mount{
{
HostPath: dir,
ContainerPath: "C:\\path\\in\\container",
},
}

mountNoDriveLetter := []*runtime.Mount{
{
HostPath: dir,
ContainerPath: "/path/in/container",
Comment thread
dcantah marked this conversation as resolved.
},
}

mountSingleFile := []*runtime.Mount{
{
HostPath: tmpfn,
ContainerPath: "/path/in/container/testfile",
},
}

type config struct {
name string
containerName string
requiredFeatures []string
runtimeHandler string
sandboxImage string
containerImage string
cmd []string
exec []string
mounts []*runtime.Mount
}

tests := []config{
{
name: "JobContainer_VolumeMount_DriveLetter",
containerName: t.Name() + "-Container-DriveLetter",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountDriveLetter,
exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"},
},
{
name: "JobContainer_VolumeMount_NoDriveLetter",
containerName: t.Name() + "-Container-NoDriveLetter",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountNoDriveLetter,
exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"},
},
{
name: "JobContainer_VolumeMount_SingleFile",
containerName: t.Name() + "-Container-SingleFile",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountSingleFile,
exec: []string{"cmd", "/c", "type", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\testfile"},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)

requiredImages := []string{test.sandboxImage, test.containerImage}
pullRequiredImages(t, requiredImages)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, test.mounts)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

r := execSync(t, client, ctx, &runtime.ExecSyncRequest{
ContainerId: containerID,
Cmd: test.exec,
})
if r.ExitCode != 0 {
t.Fatalf("failed with exit code %d checking for job container mount: %s", r.ExitCode, string(r.Stderr))
}
})
}
}