Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
249 changes: 245 additions & 4 deletions test/cri-containerd/jobcontainer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,17 @@ package cri_containerd

import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"

"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/hcn"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)

Expand All @@ -26,14 +33,14 @@ func getJobContainerPodRequestWCOW(t *testing.T) *runtime.RunPodSandboxRequest {
}
}

func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig) *runtime.CreateContainerRequest {
func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string) *runtime.CreateContainerRequest {
return &runtime.CreateContainerRequest{
Config: &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: t.Name() + "-Container",
},
Image: &runtime.ImageSpec{
Image: imageWindowsNanoserver,
Image: image,
},
Command: []string{
"cmd",
Expand All @@ -44,7 +51,7 @@ func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.P
},

Annotations: map[string]string{
"microsoft.com/hostprocess": "true",
"microsoft.com/hostprocess-container": "true",
"microsoft.com/hostprocess-inherit-user": "true",
},
},
Expand All @@ -67,7 +74,7 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand All @@ -85,3 +92,237 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) {
t.Fatalf("expected user: '%s', got '%s'", username, stdout)
}
}

func Test_RunContainer_Hostname_JobContainer_WCOW(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageWindowsNanoserver})
client := newTestRuntimeClient(t)

// This test validates that the hostname we see on the host and in the container are the same, and they
// should be as the container is just a process on the host.
hostname, err := exec.Command("hostname").Output()
if err != nil {
t.Fatalf("failed to get hostname: %s", err)
}

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

execResponse := execSync(t, client, ctx, &runtime.ExecSyncRequest{
ContainerId: containerID,
Cmd: []string{"hostname"},
})
containerStdout := strings.Trim(string(execResponse.Stdout), " \r\n")
hostStdout := strings.Trim(string(hostname), " \r\n")
if hostStdout != containerStdout {
t.Fatalf("expected hostname to be the same within job container. got %s but expected %s", hostStdout, containerStdout)
}
}

func Test_RunContainer_HNS_JobContainer_WCOW(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageJobContainerHNS})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

networkName := fmt.Sprintf("JobContainer-Network-%s", podID)
containerRequest.Config.Command = []string{
"go/src/hns/hns.exe",
networkName,
}

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

// Wait a couple seconds for the network to make sure the network is come up.
time.Sleep(time.Second * 5)
// After the init process ends, there should be an HNS network named after os.Args[1] that we passed
// in. Check if it exists to:
// 1. See if it worked and if it's not present we need to fail.
// 2. If it did work we need to delete it.
network, err := hcn.GetNetworkByName(networkName)
if err != nil {
if _, ok := err.(hcn.NetworkNotFoundError); ok {
t.Fatalf("no network/switch with name %q found: %s", networkName, err)
}
t.Fatalf("failed to get network/switch with name %q: %s", networkName, err)
}

if err := network.Delete(); err != nil {
t.Fatalf("failed to delete HNS network: %s", err)
}
}

func Test_RunContainer_VHD_JobContainer_WCOW(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageJobContainerVHD})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

vhdPath := filepath.Join(dir, "test.vhdx")
containerRequest.Config.Command = []string{
"go/src/vhd/vhd.exe",
vhdPath,
}

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

// Wait a couple seconds for the container to start up and make the vhd.
time.Sleep(time.Second * 3)
// The vhd.exe binary in the container will create an NTFS formatted vhd at `vhdPath`. Verify this
// exists and we can attach it. This is our success case.
if _, err := os.Stat(vhdPath); os.IsNotExist(err) {
t.Fatalf("vhd not present at %q: %s", vhdPath, err)
}

if err := vhd.AttachVhd(vhdPath); err != nil {
t.Fatalf("failed to attach vhd at %q: %s", vhdPath, err)
}

if err := vhd.DetachVhd(vhdPath); err != nil {
t.Fatalf("failed to detach vhd at %q: %s", vhdPath, err)
}
}

func Test_RunContainer_ETW_JobContainer_WCOW(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageJobContainerETW})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

// For this test we'll launch an image that has a wprp file inside that we'll use to take an etw trace.
// After the etl file is generated we'll use tracerpt to create a report/dump file of the trace. This is
// just to verify that a common use case of grabbing host traces/diagnostics can be achieved.
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
// Need for network name is solely because the only provider defined in the image is for HNS, so
// we do a simple HNS operation to get some output.
var (
networkName = fmt.Sprintf("JobContainer-Network-%s", podID)
etlFile = filepath.Join(dir, "output.etl")
dumpFile = filepath.Join(dir, "output.xml")
)
containerRequest.Config.Command = []string{
"go/src/etw/etw.exe",
networkName,
etlFile,
dumpFile,
}

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

// Wait some time for the container to start up and manipulate the etl file.
time.Sleep(time.Second * 10)
if _, err := os.Stat(etlFile); os.IsNotExist(err) {
t.Fatalf("failed to find etl file %q: %s", etlFile, err)
}

if _, err := os.Stat(dumpFile); os.IsNotExist(err) {
t.Fatalf("failed to find dump file %q: %s", dumpFile, err)
}
}

func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageWindowsNanoserver})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

execResponse := execSync(t, client, ctx, &runtime.ExecSyncRequest{
ContainerId: containerID,
Cmd: []string{"mountvol"},
})
containerStdout := strings.Trim(string(execResponse.Stdout), " \r\n")

// This test validates we see the same volumes on the host as in the container. We have to do this after the
// container has been launched as the containers scratch space is a new volume
volumes, err := exec.Command("mountvol").Output()
if err != nil {
t.Fatalf("failed to get volumes: %s", err)
}
hostStdout := strings.Trim(string(volumes), " \r\n")

if hostStdout != containerStdout {
t.Fatalf("expected volumes to be the same within job process container. got %q but expected %q", hostStdout, containerStdout)
}
}
15 changes: 9 additions & 6 deletions test/cri-containerd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,15 @@ const (
testDriversPath = "C:\\ContainerPlat\\testdrivers"
testGPUBootFiles = "C:\\ContainerPlat\\LinuxBootFiles\\nvidiagpu"

lcowRuntimeHandler = "runhcs-lcow"
imageLcowK8sPause = "k8s.gcr.io/pause:3.1"
imageLcowAlpine = "docker.io/library/alpine:latest"
imageLcowCosmos = "cosmosarno/spark-master:2.4.1_2019-04-18_8e864ce"
alpineAspNet = "mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine3.11"
alpineAspnetUpgrade = "mcr.microsoft.com/dotnet/core/aspnet:3.1.2-alpine3.11"
lcowRuntimeHandler = "runhcs-lcow"
imageLcowK8sPause = "k8s.gcr.io/pause:3.1"
imageLcowAlpine = "docker.io/library/alpine:latest"
imageLcowCosmos = "cosmosarno/spark-master:2.4.1_2019-04-18_8e864ce"
imageJobContainerHNS = "cplatpublic.azurecr.io/jobcontainer_hns:latest"
imageJobContainerETW = "cplatpublic.azurecr.io/jobcontainer_etw:latest"
imageJobContainerVHD = "cplatpublic.azurecr.io/jobcontainer_vhd:latest"
alpineAspNet = "mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine3.11"
alpineAspnetUpgrade = "mcr.microsoft.com/dotnet/core/aspnet:3.1.2-alpine3.11"
// Default account name for use with GMSA related tests. This will not be
// present/you will not have access to the account on your machine unless
// your environment is configured properly.
Expand Down
11 changes: 11 additions & 0 deletions test/cri-containerd/test-images/jobcontainer_createvhd/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Irrelevant what image version we use for job containers as there's no container <-> host OS version restraint.
FROM golang:1.15.10-nanoserver-1809

# Get administrator privileges
USER containeradministrator

WORKDIR /go/src/vhd
COPY main.go .

RUN go get -d -v ./...
RUN go build -mod=mod
36 changes: 36 additions & 0 deletions test/cri-containerd/test-images/jobcontainer_createvhd/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
package main

import (
"context"
"log"
"os"
"syscall"

"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/computestorage"
"golang.org/x/sys/windows"
)

// Simple binary to create a vhd with a single NTFS partition.
func main() {
if len(os.Args) < 2 {
log.Fatal("must provide VHDX name")
}

vhdPath := os.Args[1]
if err := vhd.CreateVhdx(vhdPath, 1, 1); err != nil {
log.Fatalf("failed to create VHDX: %s", err)
}

vhdHandle, err := vhd.OpenVirtualDisk(vhdPath, vhd.VirtualDiskAccessNone, vhd.OpenVirtualDiskFlagNone)
if err != nil {
log.Fatalf("failed to open VHDX: %s", err)
}
defer syscall.CloseHandle(vhdHandle)

if err := computestorage.FormatWritableLayerVhd(context.Background(), windows.Handle(vhdHandle)); err != nil {
log.Fatalf("failed to format VHXD: %s", err)
}

os.Exit(0)
}
11 changes: 11 additions & 0 deletions test/cri-containerd/test-images/jobcontainer_etw/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Irrelevant what image version we use for job containers as there's no container <-> host OS version restraint.
FROM golang:1.15.10-nanoserver-1809

# Get administrator privileges
USER containeradministrator

WORKDIR /go/src/etw
COPY . .

RUN go get -d -v ./...
RUN go build -mod=mod
Loading