From 966a05473cbf9c64ec20af2285ddd41ddb2ca138 Mon Sep 17 00:00:00 2001 From: Maksim An Date: Tue, 17 Nov 2020 22:20:06 -0800 Subject: [PATCH] Add support for logging binary Add integrations tests Signed-off-by: Maksim An --- .gitignore | 2 + appveyor.yml | 2 + cmd/containerd-shim-runhcs-v1/task_hcs.go | 41 +++- internal/cmd/io.go | 22 ++ internal/cmd/io_binary.go | 266 +++++++++++++++++++++ internal/cmd/io_binary_test.go | 98 ++++++++ test/cri-containerd/container.go | 16 ++ test/cri-containerd/helpers/log.go | 63 +++++ test/cri-containerd/logging_binary_test.go | 178 ++++++++++++++ test/cri-containerd/runpodsandbox_test.go | 18 +- 10 files changed, 678 insertions(+), 28 deletions(-) create mode 100644 internal/cmd/io_binary.go create mode 100644 internal/cmd/io_binary_test.go create mode 100644 test/cri-containerd/helpers/log.go create mode 100644 test/cri-containerd/logging_binary_test.go diff --git a/.gitignore b/.gitignore index b883f1fdc6..aec9bd4bb0 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ *.exe +.idea +.vscode diff --git a/appveyor.yml b/appveyor.yml index bd3f18f075..9391879b65 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -29,6 +29,7 @@ build_script: - go test -c ./cri-containerd/ -tags functional - go test -c ./functional/ -tags functional - go test -c ./runhcs/ -tags functional + - go build -o sample-logging-driver.exe ./cri-containerd/helpers/log.go artifacts: - path: 'containerd-shim-runhcs-v1.exe' @@ -43,3 +44,4 @@ artifacts: - path: './test/cri-containerd.test.exe' - path: './test/functional.test.exe' - path: './test/runhcs.test.exe' + - path: './test/sample-logging-driver.exe' diff --git a/cmd/containerd-shim-runhcs-v1/task_hcs.go b/cmd/containerd-shim-runhcs-v1/task_hcs.go index aa93019e1f..dc3c61a722 100644 --- a/cmd/containerd-shim-runhcs-v1/task_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/task_hcs.go @@ -8,6 +8,16 @@ import ( "sync" "time" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/typeurl" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" @@ -23,15 +33,6 @@ import ( "github.com/Microsoft/hcsshim/internal/shimdiag" "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" - eventstypes "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime" - "github.com/containerd/containerd/runtime/v2/task" - "github.com/containerd/typeurl" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.CreateTaskRequest, s *specs.Spec) (shimTask, error) { @@ -124,7 +125,7 @@ func newHcsTask( owner := filepath.Base(os.Args[0]) - io, err := cmd.NewNpipeIO(ctx, req.Stdin, req.Stdout, req.Stderr, req.Terminal) + io, err := cmd.NewUpstreamIO(ctx, req.ID, req.Stdout, req.Stderr, req.Stdin, req.Terminal) if err != nil { return nil, err } @@ -181,7 +182,8 @@ func newHcsTask( req.Bundle, ht.isWCOW, s.Process, - io) + io, + ) if parent != nil { // We have a parent UVM. Listen for its exit and forcibly close this @@ -288,11 +290,24 @@ func (ht *hcsTask) CreateExec(ctx context.Context, req *task.ExecProcessRequest, return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '' in task: '%s' must be running to create additional execs", ht.id) } - io, err := cmd.NewNpipeIO(ctx, req.Stdin, req.Stdout, req.Stderr, req.Terminal) + io, err := cmd.NewUpstreamIO(ctx, req.ID, req.Stdout, req.Stderr, req.Stdin, req.Terminal) if err != nil { return err } - he := newHcsExec(ctx, ht.events, ht.id, ht.host, ht.c, req.ExecID, ht.init.Status().Bundle, ht.isWCOW, spec, io) + + he := newHcsExec( + ctx, + ht.events, + ht.id, + ht.host, + ht.c, + req.ExecID, + ht.init.Status().Bundle, + ht.isWCOW, + spec, + io, + ) + ht.execs.Store(req.ExecID, he) // Publish the created event diff --git a/internal/cmd/io.go b/internal/cmd/io.go index 9c4f08ce92..e2150c22aa 100644 --- a/internal/cmd/io.go +++ b/internal/cmd/io.go @@ -3,6 +3,9 @@ package cmd import ( "context" "io" + "net/url" + + "github.com/pkg/errors" ) // UpstreamIO is an interface describing the IO to connect to above the shim. @@ -36,3 +39,22 @@ type UpstreamIO interface { // return `""`. Terminal() bool } + +// NewUpstreamIO returns an UpstreamIO instance. Currently we only support named pipes and binary +// logging driver for container IO. When using binary logger `stdout` and `stderr` are assumed to be +// the same and the value of `stderr` is completely ignored. +func NewUpstreamIO(ctx context.Context, id string, stdout string, stderr string, stdin string, terminal bool) (UpstreamIO, error) { + u, err := url.Parse(stdout) + + // Create IO with named pipes. + if err != nil || u.Scheme == "" { + return NewNpipeIO(ctx, stdin, stdout, stderr, terminal) + } + + // Create IO for binary logging driver. + if u.Scheme != "binary" { + return nil, errors.Errorf("scheme must be 'binary', got: '%s'", u.Scheme) + } + + return NewBinaryIO(ctx, id, u) +} diff --git a/internal/cmd/io_binary.go b/internal/cmd/io_binary.go new file mode 100644 index 0000000000..1283a2a1cc --- /dev/null +++ b/internal/cmd/io_binary.go @@ -0,0 +1,266 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "net" + "net/url" + "os/exec" + "sync" + "time" + + "github.com/Microsoft/go-winio" + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" + + "github.com/Microsoft/hcsshim/internal/log" +) + +const ( + binaryPipeFmt = `\\.\pipe\binary-%s-%s` + binaryCmdWaitTimeout = 10 * time.Second + binaryCmdStartTimeout = 10 * time.Second +) + +// NewBinaryIO runs a custom binary process for pluggable shim logging driver. +// +// Container's IO will be redirected to the logging driver via named pipes, which are +// passed as "CONTAINER_STDOUT", "CONTAINER_STDERR" environment variables. The logging +// driver MUST dial a wait pipe passed via "CONTAINER_WAIT" environment variable AND CLOSE +// it to indicate that it's ready to consume the IO. For customer's convenience container ID +// and namespace are also passed via "CONTAINER_ID" and "CONTAINER_NAMESPACE". +// +// The path to the logging driver can be provided via a URL's host/path. Additional arguments +// can be passed to the logger via URL query params +func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ UpstreamIO, err error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + ns = namespaces.Default + } + + var stdoutPipe, stderrPipe, waitPipe io.ReadWriteCloser + + stdoutPipePath := fmt.Sprintf(binaryPipeFmt, id, "stdout") + stdoutPipe, err = openNPipe(stdoutPipePath) + if err != nil { + return nil, err + } + + stderrPipePath := fmt.Sprintf(binaryPipeFmt, id, "stderr") + stderrPipe, err = openNPipe(stderrPipePath) + if err != nil { + return nil, err + } + + waitPipePath := fmt.Sprintf(binaryPipeFmt, id, "wait") + waitPipe, err = openNPipe(waitPipePath) + if err != nil { + return nil, err + } + defer waitPipe.Close() + + envs := []string{ + "CONTAINER_ID=" + id, + "CONTAINER_NAMESPACE=" + ns, + "CONTAINER_STDOUT=" + stdoutPipePath, + "CONTAINER_STDERR=" + stderrPipePath, + "CONTAINER_WAIT=" + waitPipePath, + } + cmd, err := newBinaryCmd(ctx, uri, envs) + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + errCh := make(chan error, 1) + // Wait for logging driver to signal to the wait pipe that it's ready to consume IO + go func() { + b := make([]byte, 1) + if _, err := waitPipe.Read(b); err != nil && err != io.EOF { + errCh <- err + return + } + errCh <- nil + }() + + select { + case err = <-errCh: + if err != nil { + return nil, errors.Wrap(err, "failed to start binary logger") + } + case <-time.After(binaryCmdStartTimeout): + return nil, errors.New("failed to start binary logger: timeout") + } + + return &binaryIO{ + cmd: cmd, + stdout: stdoutPipePath, + sout: stdoutPipe, + stderr: stderrPipePath, + serr: stderrPipe, + }, nil +} + +func newBinaryCmd(ctx context.Context, uri *url.URL, envs []string) (*exec.Cmd, error) { + if uri.Host == "" && uri.Path == "" { + return nil, errors.New("no logging driver path provided") + } + + var args []string + for k, vs := range uri.Query() { + args = append(args, k) + if len(vs) > 0 && vs[0] != "" { + args = append(args, vs[0]) + } + } + + execPath := uri.Path + // Absolute path is required, treat "binary://path/to/binary" and "binary:///path/to/binary" + // as the same. + if uri.Host != "" { + execPath = "/" + uri.Host + uri.Path + } + + cmd := exec.CommandContext(ctx, execPath, args...) + cmd.Env = append(cmd.Env, envs...) + + return cmd, nil +} + +var _ UpstreamIO = &binaryIO{} + +// Implements UpstreamIO interface to enable shim pluggable logging +type binaryIO struct { + cmd *exec.Cmd + + binaryCloser sync.Once + + stdin, stdout, stderr string + + sout, serr io.ReadWriteCloser + soutCloser sync.Once +} + +// Close named pipes for container stdout and stderr and wait for the binary process to finish. +func (b *binaryIO) Close(ctx context.Context) { + b.soutCloser.Do(func() { + if b.sout != nil { + err := b.sout.Close() + if err != nil { + log.G(ctx).WithError(err).Errorf("error while closing stdout npipe") + } + } + if b.serr != nil { + err := b.serr.Close() + if err != nil { + log.G(ctx).WithError(err).Errorf("error while closing stderr npipe") + } + } + }) + b.binaryCloser.Do(func() { + done := make(chan error) + go func() { + done <- b.cmd.Wait() + }() + + select { + case err := <-done: + if err != nil { + log.G(ctx).WithError(err).Errorf("error while waiting for binary cmd to finish") + } + case <-time.After(binaryCmdWaitTimeout): + log.G(ctx).Errorf("timeout while waiting for binaryIO process to finish. Killing") + err := b.cmd.Process.Kill() + if err != nil { + log.G(ctx).WithError(err).Errorf("error while killing binaryIO process") + } + } + }) +} + +func (b *binaryIO) CloseStdin(ctx context.Context) {} + +func (b *binaryIO) Stdin() io.Reader { + return nil +} + +func (b *binaryIO) StdinPath() string { + return "" +} + +func (b *binaryIO) Stdout() io.Writer { + return b.sout +} + +func (b *binaryIO) StdoutPath() string { + return b.stdout +} + +func (b *binaryIO) Stderr() io.Writer { + return b.serr +} + +func (b *binaryIO) StderrPath() string { + return b.stderr +} + +func (b *binaryIO) Terminal() bool { + return false +} + +type pipe struct { + l net.Listener + con net.Conn + conErr error + conWg sync.WaitGroup +} + +func openNPipe(path string) (io.ReadWriteCloser, error) { + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + + p := &pipe{l: l} + p.conWg.Add(1) + + go func() { + defer p.conWg.Done() + c, err := l.Accept() + if err != nil { + p.conErr = err + return + } + p.con = c + }() + return p, nil +} + +func (p *pipe) Write(b []byte) (int, error) { + p.conWg.Wait() + if p.conErr != nil { + return 0, errors.Wrap(p.conErr, "connection error") + } + return p.con.Write(b) +} + +func (p *pipe) Read(b []byte) (int, error) { + p.conWg.Wait() + if p.conErr != nil { + return 0, errors.Wrap(p.conErr, "connection error") + } + return p.con.Read(b) +} + +func (p *pipe) Close() error { + p.l.Close() + p.conWg.Wait() + if p.con != nil { + return p.con.Close() + } + return p.conErr +} diff --git a/internal/cmd/io_binary_test.go b/internal/cmd/io_binary_test.go new file mode 100644 index 0000000000..8c7d384700 --- /dev/null +++ b/internal/cmd/io_binary_test.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "context" + "net/url" + "testing" +) + +func Test_newBinaryCmd_Key_Value_Pair(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + type config struct { + name string + urlString string + expected string + } + + tests := []*config{ + { + name: "use-path", + urlString: "binary:///executable?-key=value", + expected: "/executable -key value", + }, + { + name: "use-host", + urlString: "binary://executable?-key=value", + expected: "/executable -key value", + }, + { + name: "use-host-and-path", + urlString: "binary://path/to/executable?flag", + expected: "/path/to/executable flag", + }, + } + + for _, cfg := range tests { + t.Run(cfg.name, func(t *testing.T) { + u, err := url.Parse(cfg.urlString) + if err != nil { + t.Fatalf("failed to parse url: %s", cfg.urlString) + } + + cmd, err := newBinaryCmd(ctx, u, nil) + if err != nil { + t.Fatalf("error while creating cmd: %s", err) + } + + if cmd.String() != cfg.expected { + t.Fatalf("failed to create cmd. expected: '%s', actual '%s'", cfg.expected, cmd.String()) + } + }) + } +} + +func Test_newBinaryCmd_Empty_Path(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + u, _ := url.Parse("scheme://") + + cmd, err := newBinaryCmd(ctx, u, nil) + + if cmd != nil { + t.Fatalf("cmd is not nil: %s", cmd) + } + + if err == nil { + t.Fatalf("err is not expected to be nil") + } +} + +func Test_newBinaryCmd_flags(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + urlString := "schema:///path/to/binary?foo&bar&baz" + uri, _ := url.Parse(urlString) + + expectedPath := "/path/to/binary" + expectedFlags := map[string]bool{"foo": true, "bar": true, "baz": true} + + cmd, err := newBinaryCmd(ctx, uri, nil) + + if err != nil { + t.Fatalf("error creating binary cmd: %s", err) + } + + if cmd.Path != expectedPath { + t.Fatalf("invalid cmd path: %s", cmd.Path) + } + + for _, f := range cmd.Args[1:] { + if _, ok := expectedFlags[f]; !ok { + t.Fatalf("flag missing: '%s' in cmd: '%s'", f, cmd.String()) + } + } +} diff --git a/test/cri-containerd/container.go b/test/cri-containerd/container.go index a1d645819c..af4137e691 100644 --- a/test/cri-containerd/container.go +++ b/test/cri-containerd/container.go @@ -48,3 +48,19 @@ func removeContainer(t *testing.T, client runtime.RuntimeServiceClient, ctx cont t.Fatalf("failed StopContainer request for container: %s, with: %v", containerID, err) } } + +func getCreateContainerRequest(podID string, name string, image string, command []string, podConfig *runtime.PodSandboxConfig) *runtime.CreateContainerRequest { + return &runtime.CreateContainerRequest{ + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: name, + }, + Image: &runtime.ImageSpec{ + Image: image, + }, + Command: command, + }, + PodSandboxId: podID, + SandboxConfig: podConfig, + } +} diff --git a/test/cri-containerd/helpers/log.go b/test/cri-containerd/helpers/log.go new file mode 100644 index 0000000000..fe8933afef --- /dev/null +++ b/test/cri-containerd/helpers/log.go @@ -0,0 +1,63 @@ +package main + +import ( + "context" + "fmt" + "io" + "net" + "os" + "sync" + + "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +func main() { + if err := logContainerStdoutToFile(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func logContainerStdoutToFile() (err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var sout, wait net.Conn + + soutPipe := os.Getenv("CONTAINER_STDOUT") + waitPipe := os.Getenv("CONTAINER_WAIT") + + if sout, err = winio.DialPipeContext(ctx, soutPipe); err != nil { + return errors.Wrap(err, "couldn't open stdout pipe") + } + defer sout.Close() + + // The only expected argument should be output file path + if len(os.Args[1:]) != 1 { + return errors.Errorf("Expected exactly 1 argument, got: %d", len(os.Args[1:])) + } + + var dest *os.File + destPath := os.Args[1] + if dest, err = os.Create(destPath); err != nil { + return errors.Wrap(err, "couldn't open destination file") + } + defer dest.Close() + + if wait, err = winio.DialPipeContext(ctx, waitPipe); err != nil { + return errors.Wrap(err, "couldn't open wait pipe") + } + // Indicate that logging binary is ready to receive output + wait.Close() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + _, err = io.Copy(dest, sout) + }() + wg.Wait() + return +} diff --git a/test/cri-containerd/logging_binary_test.go b/test/cri-containerd/logging_binary_test.go new file mode 100644 index 0000000000..1138fd58a2 --- /dev/null +++ b/test/cri-containerd/logging_binary_test.go @@ -0,0 +1,178 @@ +// +build functional + +package cri_containerd + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" +) + +// This test requires compiling a helper logging binary which can be found +// at test/cri-containerd/helpers/log.go. Copy log.exe as "sample-logging-driver.exe" +// to ContainerPlat install directory or set "TEST_BINARY_ROOT" environment variable, +// which this test will use to construct logPath for CreateContainerRequest and as +// the location of stdout artifacts created by the binary +func Test_Run_Container_With_Binary_Logger(t *testing.T) { + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logBinaryRoot := os.Getenv("TEST_BINARY_ROOT") + if logBinaryRoot == "" { + logBinaryRoot = "/ContainerPlat" + } + + binaryPath := logBinaryRoot + "/sample-logging-driver.exe" + + if _, err := os.Stat(binaryPath); os.IsNotExist(err) { + t.Skip("skipping: sample logging driver missing") + } + + logPath := "binary://" + binaryPath + + type config struct { + name string + containerName string + requiredFeatures []string + runtimeHandler string + sandboxImage string + containerImage string + cmd []string + expectedContent string + } + + tests := []config{ + { + name: "WCOW_Process", + containerName: t.Name() + "-Container-WCOW_Process", + requiredFeatures: []string{featureWCOWProcess}, + runtimeHandler: wcowProcessRuntimeHandler, + sandboxImage: imageWindowsNanoserver, + containerImage: imageWindowsNanoserver, + cmd: []string{"ping", "-t", "127.0.0.1"}, + expectedContent: "Pinging 127.0.0.1 with 32 bytes of data", + }, + { + name: "WCOW_Hypervisor", + containerName: t.Name() + "-Container-WCOW_Hypervisor", + requiredFeatures: []string{featureWCOWHypervisor}, + runtimeHandler: wcowHypervisorRuntimeHandler, + sandboxImage: imageWindowsNanoserver, + containerImage: imageWindowsNanoserver, + cmd: []string{"ping", "-t", "127.0.0.1"}, + expectedContent: "Pinging 127.0.0.1 with 32 bytes of data", + }, + { + name: "LCOW", + containerName: t.Name() + "-Container-LCOW", + requiredFeatures: []string{featureLCOW}, + runtimeHandler: lcowRuntimeHandler, + sandboxImage: imageLcowK8sPause, + containerImage: imageLcowAlpine, + cmd: []string{"ash", "-c", "while true; do echo 'Hello, World!'; sleep 1; done"}, + expectedContent: "Hello, World!", + }, + } + + // Positive tests + for _, test := range tests { + t.Run(test.name+"_Positive", func(t *testing.T) { + requireFeatures(t, test.requiredFeatures...) + + requiredImages := []string{test.sandboxImage, test.containerImage} + if test.runtimeHandler == lcowRuntimeHandler { + pullRequiredLcowImages(t, requiredImages) + } else { + pullRequiredImages(t, requiredImages) + } + + podReq := getRunPodSandboxRequest(t, test.runtimeHandler) + podID := runPodSandbox(t, client, ctx, podReq) + defer removePodSandbox(t, client, ctx, podID) + + logFileName := fmt.Sprintf("%s/stdout-%s.txt", logBinaryRoot, test.name) + conReq := getCreateContainerRequest(podID, test.containerName, test.containerImage, test.cmd, podReq.Config) + conReq.Config.LogPath = logPath + fmt.Sprintf("?%s", logFileName) + + createAndRunContainer(t, client, ctx, conReq) + + if _, err := os.Stat(logFileName); os.IsNotExist(err) { + t.Fatalf("log file was not created: %s", logFileName) + } + defer os.Remove(logFileName) + + ok, err := assertFileContent(logFileName, test.expectedContent) + if err != nil { + t.Fatalf("failed to read log file: %s", err) + } + + if !ok { + t.Fatalf("file content validation failed: %s", test.expectedContent) + } + }) + } + + // Negative tests + for _, test := range tests { + t.Run(test.name+"_Negative", func(t *testing.T) { + requireFeatures(t, test.requiredFeatures...) + + requiredImages := []string{test.sandboxImage, test.containerImage} + if test.runtimeHandler == lcowRuntimeHandler { + pullRequiredLcowImages(t, requiredImages) + } else { + pullRequiredImages(t, requiredImages) + } + + podReq := getRunPodSandboxRequest(t, test.runtimeHandler) + podID := runPodSandbox(t, client, ctx, podReq) + defer removePodSandbox(t, client, ctx, podID) + + nonExistentPath := "/does/not/exist/log.txt" + conReq := getCreateContainerRequest(podID, test.containerName, test.containerImage, test.cmd, podReq.Config) + conReq.Config.LogPath = logPath + fmt.Sprintf("?%s", nonExistentPath) + + containerID := createContainer(t, client, ctx, conReq) + defer removeContainer(t, client, ctx, containerID) + + // This should fail, since the filepath doesn't exist + _, err := client.StartContainer(ctx, &runtime.StartContainerRequest{ + ContainerId: containerID, + }) + if err == nil { + t.Fatal("container start should fail") + } + + if !strings.Contains(err.Error(), "failed to start binary logger") { + t.Fatalf("expected 'failed to start binary logger' error, got: %s", err) + } + }) + } +} + +func createAndRunContainer(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, conReq *runtime.CreateContainerRequest) { + containerID := createContainer(t, client, ctx, conReq) + defer removeContainer(t, client, ctx, containerID) + + startContainer(t, client, ctx, containerID) + defer stopContainer(t, client, ctx, containerID) + + // Let stdio kick in + time.Sleep(time.Second * 1) +} + +func assertFileContent(path string, content string) (bool, error) { + fileContent, err := ioutil.ReadFile(path) + if err != nil { + return false, err + } + + return strings.Contains(string(fileContent), content), nil +} diff --git a/test/cri-containerd/runpodsandbox_test.go b/test/cri-containerd/runpodsandbox_test.go index 77dbd262f8..41aa2f1296 100644 --- a/test/cri-containerd/runpodsandbox_test.go +++ b/test/cri-containerd/runpodsandbox_test.go @@ -1019,21 +1019,9 @@ func createSandboxContainerAndExecForCustomScratch(t *testing.T, annotations map func createContainerInSandbox(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, podId, containerName, imageName string, command []string, annotations map[string]string, mounts []*runtime.Mount, podConfig *runtime.PodSandboxConfig) string { - cRequest := &runtime.CreateContainerRequest{ - Config: &runtime.ContainerConfig{ - Metadata: &runtime.ContainerMetadata{ - Name: containerName, - }, - Image: &runtime.ImageSpec{ - Image: imageName, - }, - Command: command, - Annotations: annotations, - Mounts: mounts, - }, - PodSandboxId: podId, - SandboxConfig: podConfig, - } + cRequest := getCreateContainerRequest(podId, containerName, imageName, command, podConfig) + cRequest.Config.Annotations = annotations + cRequest.Config.Mounts = mounts containerID := createContainer(t, client, ctx, cRequest)