diff --git a/internal/guest/runtime/hcsv2/container.go b/internal/guest/runtime/hcsv2/container.go index c2d234fd3a..9dcd080cb3 100644 --- a/internal/guest/runtime/hcsv2/container.go +++ b/internal/guest/runtime/hcsv2/container.go @@ -69,6 +69,22 @@ func (c *Container) ExecProcess(ctx context.Context, process *oci.Process, conSe // Add in the core rlimit specified on the container in case there was one set. This makes it so that execed processes can also generate // core dumps. process.Rlimits = c.spec.Process.Rlimits + + // If the client provided a user for the container to run as, we want to have the exec run as this user as well + // unless the exec's spec was explicitly set to a different user. If the Username field is filled in on the containers + // spec, at this point that means the work to find a uid:gid pairing for this username has already been done, so simply + // assign the uid:gid from the container. + if process.User.Username != "" { + // The exec provided a user string of it's own. Grab the uid:gid pairing for the string (if one exists). + if err := setUserStr(&oci.Spec{Root: c.spec.Root, Process: process}, process.User.Username); err != nil { + return -1, err + } + // Runc doesn't care about this, and just to be safe clear it. + process.User.Username = "" + } else if c.spec.Process.User.Username != "" { + process.User = c.spec.Process.User + } + p, err := c.container.ExecProcess(process, stdioSet) if err != nil { stdioSet.Close() diff --git a/internal/guest/runtime/hcsv2/sandbox_container.go b/internal/guest/runtime/hcsv2/sandbox_container.go index 516ef8a0bc..a4bd594505 100644 --- a/internal/guest/runtime/hcsv2/sandbox_container.go +++ b/internal/guest/runtime/hcsv2/sandbox_container.go @@ -102,8 +102,12 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) ( return errors.Wrap(err, "failed to write sandbox resolv.conf") } - if userstr, ok := spec.Annotations["io.microsoft.lcow.userstr"]; ok { - if err := setUserStr(spec, userstr); err != nil { + // User.Username is generally only used on Windows, but as there's no (easy/fast at least) way to grab + // a uid:gid pairing for a username string on the host, we need to defer this work until we're here in the + // guest. The username field is used as a temporary holding place until we can perform this work here when + // we actually have the rootfs to inspect. + if spec.Process.User.Username != "" { + if err := setUserStr(spec, spec.Process.User.Username); err != nil { return err } } diff --git a/internal/guest/runtime/hcsv2/spec.go b/internal/guest/runtime/hcsv2/spec.go index 02168f6874..99373ba75b 100644 --- a/internal/guest/runtime/hcsv2/spec.go +++ b/internal/guest/runtime/hcsv2/spec.go @@ -5,12 +5,12 @@ package hcsv2 import ( "context" "fmt" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/opencontainers/runc/libcontainer/devices" "path/filepath" "strconv" "strings" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runc/libcontainer/user" oci "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -251,11 +251,5 @@ func applyAnnotationsToSpec(ctx context.Context, spec *oci.Spec) error { } } - // Check if we need to set non-default user - if userstr, ok := spec.Annotations["io.microsoft.lcow.userstr"]; ok { - if err := setUserStr(spec, userstr); err != nil { - return err - } - } return nil } diff --git a/internal/guest/runtime/hcsv2/workload_container.go b/internal/guest/runtime/hcsv2/workload_container.go index 2148384316..bb830044de 100644 --- a/internal/guest/runtime/hcsv2/workload_container.go +++ b/internal/guest/runtime/hcsv2/workload_container.go @@ -167,6 +167,16 @@ func setupWorkloadContainerSpec(ctx context.Context, sbid, id string, spec *oci. } } + // User.Username is generally only used on Windows, but as there's no (easy/fast at least) way to grab + // a uid:gid pairing for a username string on the host, we need to defer this work until we're here in the + // guest. The username field is used as a temporary holding place until we can perform this work here when + // we actually have the rootfs to inspect. + if spec.Process.User.Username != "" { + if err := setUserStr(spec, spec.Process.User.Username); err != nil { + return err + } + } + // Force the parent cgroup into our /containers root spec.Linux.CgroupsPath = "/containers/" + id diff --git a/test/cri-containerd/container_test.go b/test/cri-containerd/container_test.go index 32c8617534..74eef12918 100644 --- a/test/cri-containerd/container_test.go +++ b/test/cri-containerd/container_test.go @@ -893,3 +893,115 @@ func Test_CreateContainer_HugePageMount_LCOW(t *testing.T) { t.Fatalf("output is supposed to contain pagesize=2M, output: %s", output) } } + +func Test_RunContainer_ExecUser_LCOW(t *testing.T) { + requireFeatures(t, featureLCOW) + + pullRequiredLcowImages(t, []string{imageLcowK8sPause, imageLcowCustomUser}) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sandboxRequest := getRunPodSandboxRequest(t, lcowRuntimeHandler, nil) + + podID := runPodSandbox(t, client, ctx, sandboxRequest) + defer removePodSandbox(t, client, ctx, podID) + defer stopPodSandbox(t, client, ctx, podID) + + cmd := []string{"sh", "-c", "while true; do sleep 1; done"} + request := &runtime.CreateContainerRequest{ + PodSandboxId: podID, + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: t.Name() + "-Container", + }, + Image: &runtime.ImageSpec{ + Image: imageLcowCustomUser, + }, + Command: cmd, + }, + SandboxConfig: sandboxRequest.Config, + } + + containerID := createContainer(t, client, ctx, request) + defer removeContainer(t, client, ctx, containerID) + startContainer(t, client, ctx, containerID) + defer stopContainer(t, client, ctx, containerID) + + // The `imageLcowCustomUser` image has a user created in the image named test that is set to run the init process as. This tests that + // any execed processes will honor the user set for the container also. + cmd = []string{"whoami"} + containerExecReq := &runtime.ExecSyncRequest{ + ContainerId: containerID, + Cmd: cmd, + Timeout: 20, + } + r := execSync(t, client, ctx, containerExecReq) + if r.ExitCode != 0 { + t.Fatalf("failed with exit code %d: %s", r.ExitCode, string(r.Stderr)) + } + + if !strings.Contains(string(r.Stdout), "test") { + t.Fatalf("expected user for exec to be 'test', got %q", string(r.Stdout)) + } +} + +func Test_RunContainer_ExecUser_Root_LCOW(t *testing.T) { + requireFeatures(t, featureLCOW) + + pullRequiredLcowImages(t, []string{imageLcowK8sPause, imageLcowCustomUser}) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sandboxRequest := getRunPodSandboxRequest(t, lcowRuntimeHandler, nil) + + podID := runPodSandbox(t, client, ctx, sandboxRequest) + defer removePodSandbox(t, client, ctx, podID) + defer stopPodSandbox(t, client, ctx, podID) + + // Overide what user to run the container as and see if the exec also runs as root now. + cmd := []string{"sh", "-c", "while true; do sleep 1; done"} + request := &runtime.CreateContainerRequest{ + PodSandboxId: podID, + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: t.Name() + "-Container", + }, + Image: &runtime.ImageSpec{ + Image: imageLcowCustomUser, + }, + Command: cmd, + Linux: &runtime.LinuxContainerConfig{ + SecurityContext: &runtime.LinuxContainerSecurityContext{ + RunAsUsername: "root", + }, + }, + }, + SandboxConfig: sandboxRequest.Config, + } + + containerID := createContainer(t, client, ctx, request) + defer removeContainer(t, client, ctx, containerID) + startContainer(t, client, ctx, containerID) + defer stopContainer(t, client, ctx, containerID) + + // The `imageLcowCustomUser` image has a user created in the image named test that is set to run the init process as. This tests that + // any execed processes will honor the user set for the container also. + cmd = []string{"whoami"} + containerExecReq := &runtime.ExecSyncRequest{ + ContainerId: containerID, + Cmd: cmd, + Timeout: 20, + } + r := execSync(t, client, ctx, containerExecReq) + if r.ExitCode != 0 { + t.Fatalf("failed with exit code %d: %s", r.ExitCode, string(r.Stderr)) + } + + if !strings.Contains(string(r.Stdout), "root") { + t.Fatalf("expected user for exec to be 'root', got %q", string(r.Stdout)) + } +} diff --git a/test/cri-containerd/main.go b/test/cri-containerd/main.go index 015d6a3337..f673304874 100644 --- a/test/cri-containerd/main.go +++ b/test/cri-containerd/main.go @@ -51,6 +51,7 @@ const ( imageLcowAlpineCoreDump = "cplatpublic.azurecr.io/stackoverflow-alpine:latest" imageWindowsProcessDump = "cplatpublic.azurecr.io/crashdump:latest" imageLcowCosmos = "cosmosarno/spark-master:2.4.1_2019-04-18_8e864ce" + imageLcowCustomUser = "cplatpublic.azurecr.io/linux_custom_user:latest" imageJobContainerHNS = "cplatpublic.azurecr.io/jobcontainer_hns:latest" imageJobContainerETW = "cplatpublic.azurecr.io/jobcontainer_etw:latest" imageJobContainerVHD = "cplatpublic.azurecr.io/jobcontainer_vhd:latest" @@ -164,7 +165,7 @@ func getWindowsNanoserverImage(build uint16) string { case osversion.V20H2: return "mcr.microsoft.com/windows/nanoserver:2009" default: - return "mcr.microsoft.com/windows/nanoserver:2009" + panic("unsupported build") } } @@ -181,7 +182,7 @@ func getWindowsServerCoreImage(build uint16) string { case osversion.V20H2: return "mcr.microsoft.com/windows/servercore:2009" default: - return "mcr.microsoft.com/windows/nanoserver:2009" + panic("unsupported build") } } diff --git a/test/cri-containerd/test-images/lcow_custom_user/Dockerfile b/test/cri-containerd/test-images/lcow_custom_user/Dockerfile new file mode 100644 index 0000000000..f1d541c0bb --- /dev/null +++ b/test/cri-containerd/test-images/lcow_custom_user/Dockerfile @@ -0,0 +1,4 @@ +FROM ubuntu:latest + +RUN useradd -ms /bin/bash test +USER test \ No newline at end of file