diff --git a/cmd/containerd-shim-runhcs-v1/pod.go b/cmd/containerd-shim-runhcs-v1/pod.go index 687dba1179..bf9dfa930b 100644 --- a/cmd/containerd-shim-runhcs-v1/pod.go +++ b/cmd/containerd-shim-runhcs-v1/pod.go @@ -83,6 +83,11 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques owner := filepath.Base(os.Args[0]) isWCOW := oci.IsWCOW(s) + p := pod{ + events: events, + id: req.ID, + } + var parent *uvm.UtilityVM if oci.IsIsolated(s) { // Create the UVM parent @@ -125,9 +130,33 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques parent.Close() return nil, err } + } else if oci.IsJobContainer(s) { + // If we're making a job container fake a task (i.e reuse the wcowPodSandbox logic) + p.sandboxTask = newWcowPodSandboxTask(ctx, events, req.ID, req.Bundle, parent, "") + if err := events.publishEvent( + ctx, + runtime.TaskCreateEventTopic, + &eventstypes.TaskCreate{ + ContainerID: req.ID, + Bundle: req.Bundle, + Rootfs: req.Rootfs, + IO: &eventstypes.TaskIO{ + Stdin: req.Stdin, + Stdout: req.Stdout, + Stderr: req.Stderr, + Terminal: req.Terminal, + }, + Checkpoint: "", + Pid: 0, + }); err != nil { + return nil, err + } + p.jobContainer = true + return &p, nil } else if !isWCOW { return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "oci spec does not contain WCOW or LCOW spec") } + defer func() { // clean up the uvm if we fail any further operations if err != nil && parent != nil { @@ -135,12 +164,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques } }() - p := pod{ - events: events, - id: req.ID, - host: parent, - } - + p.host = parent if parent != nil { cid := req.ID if id, ok := s.Annotations[oci.AnnotationNcproxyContainerID]; ok { @@ -232,6 +256,11 @@ type pod struct { // It MUST be treated as read only in the lifetime of the pod. host *uvm.UtilityVM + // jobContainer specifies whether this pod is for WCOW job containers only. + // + // It MUST be treated as read only in the lifetime of the pod. + jobContainer bool + workloadTasks sync.Map } @@ -263,6 +292,17 @@ func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *sp return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists id pod: '%s'", req.ID, p.id) } + if p.jobContainer { + // This is a short circuit to make sure that all containers in a pod will have + // the same IP address/be added to the same compartment. + // + // There will need to be OS work needed to support this scenario, so for now we need to block on + // this. + if !oci.IsJobContainer(s) { + return nil, errors.New("cannot create a normal process isolated container if the pod sandbox is a job container") + } + } + ct, sid, err := oci.GetSandboxTypeAndID(s.Annotations) if err != nil { return nil, err diff --git a/cmd/containerd-shim-runhcs-v1/task_hcs.go b/cmd/containerd-shim-runhcs-v1/task_hcs.go index eaef67b2ab..dee9758a46 100644 --- a/cmd/containerd-shim-runhcs-v1/task_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/task_hcs.go @@ -29,6 +29,7 @@ import ( "github.com/Microsoft/hcsshim/internal/hcs/schema1" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/Microsoft/hcsshim/internal/jobcontainers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oci" "github.com/Microsoft/hcsshim/internal/processorinfo" @@ -113,6 +114,39 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat return shim, nil } +// createContainer is a generic call to return either a process/hypervisor isolated container, or a job container +// based on what is set in the OCI spec. +func createContainer(ctx context.Context, id, owner, netNS string, s *specs.Spec, parent *uvm.UtilityVM, shimOpts *runhcsopts.Options) (cow.Container, *resources.Resources, error) { + var ( + err error + container cow.Container + resources *resources.Resources + ) + + if oci.IsJobContainer(s) { + container, resources, err = jobcontainers.Create(ctx, id, s) + if err != nil { + return nil, nil, err + } + } else { + opts := &hcsoci.CreateOptions{ + ID: id, + Owner: owner, + Spec: s, + HostingSystem: parent, + NetworkNamespace: netNS, + } + if shimOpts != nil { + opts.ScaleCPULimitsToSandbox = shimOpts.ScaleCpuLimitsToSandbox + } + container, resources, err = hcsoci.CreateContainer(ctx, opts) + if err != nil { + return nil, nil, err + } + } + return container, resources, nil +} + // newHcsTask creates a container within `parent` and its init exec process in // the `shimExecCreated` state and returns the task that tracks its lifetime. // @@ -152,19 +186,7 @@ func newHcsTask( shimOpts = v.(*runhcsopts.Options) } - opts := hcsoci.CreateOptions{ - ID: req.ID, - Owner: owner, - Spec: s, - HostingSystem: parent, - NetworkNamespace: netNS, - } - - if shimOpts != nil { - opts.ScaleCPULimitsToSandbox = shimOpts.ScaleCpuLimitsToSandbox - } - - system, resources, err := hcsoci.CreateContainer(ctx, &opts) + container, resources, err := createContainer(ctx, req.ID, owner, netNS, s, parent, shimOpts) if err != nil { return nil, err } @@ -173,7 +195,7 @@ func newHcsTask( events: events, id: req.ID, isWCOW: oci.IsWCOW(s), - c: system, + c: container, cr: resources, ownsHost: ownsParent, host: parent, @@ -186,7 +208,7 @@ func newHcsTask( events, req.ID, parent, - system, + container, req.ID, req.Bundle, ht.isWCOW, diff --git a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go index 0877b51ae2..e8a5174945 100644 --- a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go +++ b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go @@ -284,6 +284,9 @@ func (wpst *wcowPodSandboxTask) Share(ctx context.Context, req *shimdiag.ShareRe func (wpst *wcowPodSandboxTask) Stats(ctx context.Context) (*stats.Statistics, error) { stats := &stats.Statistics{} + if wpst.host == nil { + return stats, nil + } vmStats, err := wpst.host.Stats(ctx) if err != nil && !isStatsNotFound(err) { return nil, err diff --git a/internal/hcsoci/resources_lcow.go b/internal/hcsoci/resources_lcow.go index 2e634c6e2a..c024605f73 100644 --- a/internal/hcsoci/resources_lcow.go +++ b/internal/hcsoci/resources_lcow.go @@ -42,12 +42,12 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * containerRootInUVM := r.ContainerRootInUVM() if coi.Spec.Windows != nil && len(coi.Spec.Windows.LayerFolders) > 0 { log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage") - rootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, coi.HostingSystem) + rootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } coi.Spec.Root.Path = rootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, isSandbox) + layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) r.SetLayers(layers) } else if coi.Spec.Root.Path != "" { // This is the "Plan 9" root filesystem. diff --git a/internal/hcsoci/resources_wcow.go b/internal/hcsoci/resources_wcow.go index 46599be037..7f3c67140b 100644 --- a/internal/hcsoci/resources_wcow.go +++ b/internal/hcsoci/resources_wcow.go @@ -52,12 +52,12 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) { log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage") containerRootInUVM := r.ContainerRootInUVM() - containerRootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, coi.HostingSystem) + containerRootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } coi.Spec.Root.Path = containerRootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, isSandbox) + layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) r.SetLayers(layers) } diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index 80ae9e8501..88fbfb05e8 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -21,10 +21,10 @@ import ( "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/winapi" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -66,7 +66,6 @@ type JobContainer struct { spec *specs.Spec // OCI spec used to create the container job *jobobject.JobObject // Object representing the job object the container owns sandboxMount string // Path to where the sandbox is mounted on the host - m sync.Mutex closedWaitOnce sync.Once init initProc startTimestamp time.Time @@ -89,33 +88,21 @@ func newJobContainer(id string, s *specs.Spec) *JobContainer { } // Create creates a new JobContainer from `s`. -func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, err error) { +func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ *resources.Resources, err error) { log.G(ctx).WithField("id", id).Debug("Creating job container") if s == nil { - return nil, errors.New("Spec must be supplied") + return nil, nil, errors.New("Spec must be supplied") } if id == "" { g, err := guid.NewV4() if err != nil { - return nil, err + return nil, nil, err } id = g.String() } - if err := mountLayers(ctx, s); err != nil { - return nil, errors.Wrap(err, "failed to mount container layers") - } - - volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` - if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil { - return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path) - } - if s.Root.Path[len(s.Root.Path)-1] != '\\' { - s.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat - } - container := newJobContainer(id, s) // Create the job object all processes will run in. @@ -125,52 +112,50 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, err } job, err := jobobject.Create(ctx, options) if err != nil { - return nil, errors.Wrap(err, "failed to create job object") + return nil, nil, errors.Wrap(err, "failed to create job object") } // Parity with how we handle process isolated containers. We set the same flag which // behaves the same way for a silo. if err := job.SetTerminateOnLastHandleClose(); err != nil { - return nil, errors.Wrap(err, "failed to set terminate on last handle close on job container") + return nil, nil, errors.Wrap(err, "failed to set terminate on last handle close on job container") } container.job = job - var path string + r := resources.NewContainerResources(id) defer func() { if err != nil { container.Close() - if path != "" { - _ = removeSandboxMountPoint(ctx, path) - } + _ = resources.ReleaseResources(ctx, r, nil, true) } }() - limits, err := specToLimits(ctx, id, s) - if err != nil { - return nil, errors.Wrap(err, "failed to convert OCI spec to job object limits") + sandboxPath := fmt.Sprintf(sandboxMountFormat, id) + if err := mountLayers(ctx, s, sandboxPath); err != nil { + return nil, nil, errors.Wrap(err, "failed to mount container layers") } + container.sandboxMount = sandboxPath - // Set resource limits on the job object based off of oci spec. - if err := job.SetResourceLimits(limits); err != nil { - return nil, errors.Wrap(err, "failed to set resource limits") + layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, sandboxPath, false) + r.SetLayers(layers) + + volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` + if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil { + return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path) } - // Setup directory sandbox volume will be mounted - sandboxPath := fmt.Sprintf(sandboxMountFormat, id) - if _, err := os.Stat(sandboxPath); os.IsNotExist(err) { - if err := os.MkdirAll(sandboxPath, 0777); err != nil { - return nil, errors.Wrap(err, "failed to create mounted folder") - } + limits, err := specToLimits(ctx, id, s) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to convert OCI spec to job object limits") } - path = sandboxPath - if err := mountSandboxVolume(ctx, path, s.Root.Path); err != nil { - return nil, errors.Wrap(err, "failed to bind payload directory on host") + // Set resource limits on the job object based off of oci spec. + if err := job.SetResourceLimits(limits); err != nil { + return nil, nil, errors.Wrap(err, "failed to set resource limits") } - container.sandboxMount = path go container.waitBackground(ctx) - return container, nil + return container, r, nil } // CreateProcess creates a process on the host, starts it, adds it to the containers @@ -283,29 +268,6 @@ func (c *JobContainer) Modify(ctx context.Context, config interface{}) (err erro return errors.New("modify not supported for job containers") } -// Release unmounts all of the container layers. Safe to call multiple times, if no storage -// is mounted this call will just return nil. -func (c *JobContainer) Release(ctx context.Context) error { - c.m.Lock() - defer c.m.Unlock() - - log.G(ctx).WithFields(logrus.Fields{ - "id": c.id, - "path": c.sandboxMount, - }).Warn("removing sandbox volume mount") - - if c.sandboxMount != "" { - if err := removeSandboxMountPoint(ctx, c.sandboxMount); err != nil { - return errors.Wrap(err, "failed to remove sandbox volume mount path") - } - if err := layers.UnmountContainerLayers(ctx, c.spec.Windows.LayerFolders, "", nil, layers.UnmountOperationAll); err != nil { - return errors.Wrap(err, "failed to unmount container layers") - } - c.sandboxMount = "" - } - return nil -} - // Start starts the container. There's nothing to "start" for job containers, so this just // sets the start timestamp. func (c *JobContainer) Start(ctx context.Context) error { @@ -484,7 +446,7 @@ func (c *JobContainer) waitBackground(ctx context.Context) { // them to exit. <-c.init.proc.waitBlock - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() if err := c.Shutdown(ctx); err != nil { _ = c.Terminate(ctx) diff --git a/internal/jobcontainers/storage.go b/internal/jobcontainers/storage.go index 270d44def2..7c4fe4b168 100644 --- a/internal/jobcontainers/storage.go +++ b/internal/jobcontainers/storage.go @@ -10,14 +10,12 @@ import ( "github.com/Microsoft/hcsshim/internal/wclayer" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" ) // Trailing backslash required for SetVolumeMountPoint and DeleteVolumeMountPoint const sandboxMountFormat = `C:\C\%s\` -func mountLayers(ctx context.Context, s *specs.Spec) error { +func mountLayers(ctx context.Context, s *specs.Spec, volumeMountPath string) error { if s == nil || s.Windows == nil || s.Windows.LayerFolders == nil { return errors.New("field 'Spec.Windows.Layerfolders' is not populated") } @@ -43,7 +41,7 @@ func mountLayers(ctx context.Context, s *specs.Spec) error { if s.Root.Path == "" { log.G(ctx).Debug("mounting job container storage") - containerRootPath, err := layers.MountContainerLayers(ctx, s.Windows.LayerFolders, "", nil) + containerRootPath, err := layers.MountContainerLayers(ctx, s.Windows.LayerFolders, "", volumeMountPath, nil) if err != nil { return errors.Wrap(err, "failed to mount container storage") } @@ -51,43 +49,3 @@ func mountLayers(ctx context.Context, s *specs.Spec) error { } return nil } - -// Mount the sandbox vhd to a user friendly path. -func mountSandboxVolume(ctx context.Context, hostPath, volumeName string) (err error) { - log.G(ctx).WithFields(logrus.Fields{ - "hostpath": hostPath, - "volumeName": volumeName, - }).Debug("mounting sandbox volume for job container") - - if _, err := os.Stat(hostPath); os.IsNotExist(err) { - if err := os.MkdirAll(hostPath, 0777); err != nil { - return err - } - } - - defer func() { - if err != nil { - os.RemoveAll(hostPath) - } - }() - - if err = windows.SetVolumeMountPoint(windows.StringToUTF16Ptr(hostPath), windows.StringToUTF16Ptr(volumeName)); err != nil { - return errors.Wrapf(err, "failed to mount sandbox volume to %s on host", hostPath) - } - return nil -} - -// Remove volume mount point. And remove folder afterwards. -func removeSandboxMountPoint(ctx context.Context, hostPath string) error { - log.G(ctx).WithFields(logrus.Fields{ - "hostpath": hostPath, - }).Debug("mounting sandbox volume for job container") - - if err := windows.DeleteVolumeMountPoint(windows.StringToUTF16Ptr(hostPath)); err != nil { - return errors.Wrap(err, "failed to delete sandbox volume mount point") - } - if err := os.Remove(hostPath); err != nil { - return errors.Wrapf(err, "failed to remove sandbox mounted folder path %q", hostPath) - } - return nil -} diff --git a/internal/layers/layers.go b/internal/layers/layers.go index b5ab18a7ca..67ba8125fe 100644 --- a/internal/layers/layers.go +++ b/internal/layers/layers.go @@ -6,6 +6,7 @@ package layers import ( "context" "fmt" + "os" "path/filepath" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -16,12 +17,14 @@ import ( "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) // ImageLayers contains all the layers for an image. type ImageLayers struct { vm *uvm.UtilityVM containerRootInUVM string + volumeMountPath string layers []string // In some instances we may want to avoid cleaning up the image layers, such as when tearing // down a sandbox container since the UVM will be torn down shortly after and the resources @@ -29,11 +32,12 @@ type ImageLayers struct { skipCleanup bool } -func NewImageLayers(vm *uvm.UtilityVM, containerRootInUVM string, layers []string, skipCleanup bool) *ImageLayers { +func NewImageLayers(vm *uvm.UtilityVM, containerRootInUVM string, layers []string, volumeMountPath string, skipCleanup bool) *ImageLayers { return &ImageLayers{ vm: vm, containerRootInUVM: containerRootInUVM, layers: layers, + volumeMountPath: volumeMountPath, skipCleanup: skipCleanup, } } @@ -51,7 +55,7 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { if layers.vm != nil { crp = containerRootfsPath(layers.vm, layers.containerRootInUVM) } - err := UnmountContainerLayers(ctx, layers.layers, crp, layers.vm, op) + err := UnmountContainerLayers(ctx, layers.layers, crp, layers.volumeMountPath, layers.vm, op) if err != nil { return err } @@ -67,9 +71,11 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { // v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder // inside the utility VM which is a GUID mapping of the scratch folder. Each // of the layers are the VSMB locations where the read-only layers are mounted. +// Job container: Returns the mount path on the host as a volume guid, with the volume mounted on +// the host at `volumeMountPath`. // // TODO dcantah: Keep better track of the layers that are added, don't simply discard the SCSI, VSMB, etc. resource types gotten inside. -func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot string, uvm *uvmpkg.UtilityVM) (_ string, err error) { +func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers") if uvm == nil { @@ -100,6 +106,14 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot if err != nil { return "", err } + + // Mount the volume to a directory on the host if requested. This is the case for job containers. + if volumeMountPath != "" { + if err := mountSandboxVolume(ctx, volumeMountPath, mountPath); err != nil { + return "", err + } + } + return mountPath, nil } @@ -276,7 +290,7 @@ const ( ) // UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting -func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath string, uvm *uvmpkg.UtilityVM, op UnmountOperation) error { +func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, uvm *uvmpkg.UtilityVM, op UnmountOperation) error { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::unmountContainerLayers") if uvm == nil { // Must be an argon - folders are mounted on the host @@ -286,6 +300,14 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe if len(layerFolders) < 1 { return errors.New("need at least one layer for Unmount") } + + // Remove the mount point if there is one. This is the case for job containers. + if volumeMountPath != "" { + if err := removeSandboxMountPoint(ctx, volumeMountPath); err != nil { + return err + } + } + path := layerFolders[len(layerFolders)-1] if err := wclayer.UnprepareLayer(ctx, path); err != nil { return err @@ -398,3 +420,48 @@ func getScratchVHDPath(layerFolders []string) (string, error) { } return hostPath, nil } + +// Mount the sandbox vhd to a user friendly path. +func mountSandboxVolume(ctx context.Context, hostPath, volumeName string) (err error) { + log.G(ctx).WithFields(logrus.Fields{ + "hostpath": hostPath, + "volumeName": volumeName, + }).Debug("mounting volume for container") + + if _, err := os.Stat(hostPath); os.IsNotExist(err) { + if err := os.MkdirAll(hostPath, 0777); err != nil { + return err + } + } + + defer func() { + if err != nil { + os.RemoveAll(hostPath) + } + }() + + // Make sure volumeName ends with a trailing slash as required. + if volumeName[len(volumeName)-1] != '\\' { + volumeName += `\` // Be nice to clients and make sure well-formed for back-compat + } + + if err = windows.SetVolumeMountPoint(windows.StringToUTF16Ptr(hostPath), windows.StringToUTF16Ptr(volumeName)); err != nil { + return errors.Wrapf(err, "failed to mount sandbox volume to %s on host", hostPath) + } + return nil +} + +// Remove volume mount point. And remove folder afterwards. +func removeSandboxMountPoint(ctx context.Context, hostPath string) error { + log.G(ctx).WithFields(logrus.Fields{ + "hostpath": hostPath, + }).Debug("removing volume mount point for container") + + if err := windows.DeleteVolumeMountPoint(windows.StringToUTF16Ptr(hostPath)); err != nil { + return errors.Wrap(err, "failed to delete sandbox volume mount point") + } + if err := os.Remove(hostPath); err != nil { + return errors.Wrapf(err, "failed to remove sandbox mounted folder path %q", hostPath) + } + return nil +} diff --git a/internal/oci/util.go b/internal/oci/util.go index e57b52ae6c..85f52016ff 100644 --- a/internal/oci/util.go +++ b/internal/oci/util.go @@ -16,3 +16,8 @@ func IsWCOW(s *specs.Spec) bool { func IsIsolated(s *specs.Spec) bool { return IsLCOW(s) || (s.Windows != nil && s.Windows.HyperV != nil) } + +// IsJobContainer checks if `s` is asking for a Windows job container. +func IsJobContainer(s *specs.Spec) bool { + return s.Annotations[AnnotationHostProcessContainer] == "true" +}