diff --git a/internal/guestrequest/types.go b/internal/guestrequest/types.go index afb7b5ad06..67f56e28f8 100644 --- a/internal/guestrequest/types.go +++ b/internal/guestrequest/types.go @@ -25,10 +25,11 @@ type CombinedLayers struct { // SCSI. Scratch space for remote file-system commands, or R/W layer for containers type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` + MountPath string `json:"MountPath,omitempty"` + Lun uint8 `json:"Lun,omitempty"` + Controller uint8 `json:"Controller,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` + Options []string `json:"Options,omitempty"` } type WCOWMappedVirtualDisk struct { diff --git a/internal/hcsoci/resources_lcow.go b/internal/hcsoci/resources_lcow.go index 66752d18a1..2e634c6e2a 100644 --- a/internal/hcsoci/resources_lcow.go +++ b/internal/hcsoci/resources_lcow.go @@ -89,11 +89,12 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * break } } + l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) if mount.Type == "physical-disk" { l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount") uvmPathForShare = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, coi.HostingSystem.UVMMountCounter()) - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly) + scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly, mount.Options) if err != nil { return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) } @@ -107,7 +108,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // if the scsi device is already attached then we take the uvm path that the function below returns // that is where it was previously mounted in UVM - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, hostPath, uvmPathForShare, readOnly, uvm.VMAccessTypeIndividual) + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, hostPath, uvmPathForShare, readOnly, mount.Options, uvm.VMAccessTypeIndividual) if err != nil { return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) } @@ -136,6 +137,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * uvmPathForFile = path.Join(uvmPathForShare, fileName) } l.Debug("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount") + share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForShare, readOnly, restrictAccess, allowedNames) if err != nil { return errors.Wrapf(err, "adding plan9 mount %+v", mount) @@ -172,7 +174,8 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // use lcowNvidiaMountPath since we only support nvidia gpus right now // must use scsi here since DDA'ing a hyper-v pci device is not supported on VMs that have ANY virtual memory // gpuvhd must be granted VM Group access. - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, gpuSupportVhdPath, uvm.LCOWNvidiaMountPath, true, uvm.VMAccessTypeNoop) + options := []string{"ro"} + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, gpuSupportVhdPath, uvm.LCOWNvidiaMountPath, true, options, uvm.VMAccessTypeNoop) if err != nil { return errors.Wrapf(err, "failed to add scsi device %s in the UVM %s at %s", gpuSupportVhdPath, coi.HostingSystem.ID(), uvm.LCOWNvidiaMountPath) } diff --git a/internal/hcsoci/resources_wcow.go b/internal/hcsoci/resources_wcow.go index f2984a423b..79443290a5 100644 --- a/internal/hcsoci/resources_wcow.go +++ b/internal/hcsoci/resources_wcow.go @@ -135,7 +135,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) if mount.Type == "physical-disk" { l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly) + scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly, mount.Options) if err != nil { return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) } @@ -143,7 +143,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R r.Add(scsiMount) } else if mount.Type == "virtual-disk" { l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, mount.Source, uvmPath, readOnly, uvm.VMAccessTypeIndividual) + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, mount.Source, uvmPath, readOnly, mount.Options, uvm.VMAccessTypeIndividual) if err != nil { return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) } diff --git a/internal/layers/layers.go b/internal/layers/layers.go index f3f4e77edd..123b87ebb1 100644 --- a/internal/layers/layers.go +++ b/internal/layers/layers.go @@ -161,7 +161,8 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot } log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") - scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, uvmpkg.VMAccessTypeIndividual) + var options []string + scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvmpkg.VMAccessTypeIndividual) if err != nil { return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } @@ -223,8 +224,9 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) } } + options := []string{"ro"} uvmPath = fmt.Sprintf(uvmpkg.LCOWGlobalMountPrefix, uvm.UVMMountCounter()) - sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, uvmpkg.VMAccessTypeNoop) + sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvmpkg.VMAccessTypeNoop) if err != nil { return "", fmt.Errorf("failed to add SCSI layer: %s", err) } diff --git a/internal/lcow/disk.go b/internal/lcow/disk.go index a208cb7231..c7af7cf6ce 100644 --- a/internal/lcow/disk.go +++ b/internal/lcow/disk.go @@ -28,7 +28,8 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er "dest": destPath, }).Debug("lcow::FormatDisk opts") - scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false) // No destination as not formatted + var options []string + scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false, options) // No destination as not formatted if err != nil { return err } diff --git a/internal/lcow/scratch.go b/internal/lcow/scratch.go index 8d1e337b52..12884b4745 100644 --- a/internal/lcow/scratch.go +++ b/internal/lcow/scratch.go @@ -66,7 +66,8 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, return fmt.Errorf("failed to create VHDx %s: %s", destFile, err) } - scsi, err := lcowUVM.AddSCSI(ctx, destFile, "", false, uvm.VMAccessTypeIndividual) // No destination as not formatted + var options []string + scsi, err := lcowUVM.AddSCSI(ctx, destFile, "", false, options, uvm.VMAccessTypeIndividual) // No destination as not formatted if err != nil { return err } diff --git a/internal/uvm/scsi.go b/internal/uvm/scsi.go index a183204cb1..563891ea0a 100644 --- a/internal/uvm/scsi.go +++ b/internal/uvm/scsi.go @@ -221,9 +221,12 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { // // `readOnly` set to `true` if the vhd/vhdx should be attached read only. // +// `guestOptions` is a slice that contains optional information to pass +// to the guest service +// // `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath string, readOnly bool, vmAccess VMAccessType) (*SCSIMount, error) { - return uvm.addSCSIActual(ctx, hostPath, uvmPath, "VirtualDisk", readOnly, vmAccess) +func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath string, readOnly bool, guestOptions []string, vmAccess VMAccessType) (*SCSIMount, error) { + return uvm.addSCSIActual(ctx, hostPath, uvmPath, "VirtualDisk", readOnly, guestOptions, vmAccess) } // AddSCSIPhysicalDisk attaches a physical disk from the host directly to the @@ -234,8 +237,11 @@ func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath stri // `uvmPath` is optional if a guest mount is not requested. // // `readOnly` set to `true` if the physical disk should be attached read only. -func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool) (*SCSIMount, error) { - return uvm.addSCSIActual(ctx, hostPath, uvmPath, "PassThru", readOnly, VMAccessTypeIndividual) +// +// `guestOptions` is a slice that contains optional information to pass +// to the guest service +func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool, guestOptions []string) (*SCSIMount, error) { + return uvm.addSCSIActual(ctx, hostPath, uvmPath, "PassThru", readOnly, guestOptions, VMAccessTypeIndividual) } // addSCSIActual is the implementation behind the external functions AddSCSI and @@ -249,10 +255,13 @@ func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath // // `readOnly` indicates the attachment should be added read only. // +// `guestOptions` is a slice that contains optional information to pass +// to the guest service +// // `vmAccess` indicates what access to grant the vm for the hostpath // // Returns result from calling modify with the given scsi mount -func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, attachmentType string, readOnly bool, vmAccess VMAccessType) (sm *SCSIMount, err error) { +func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, attachmentType string, readOnly bool, guestOptions []string, vmAccess VMAccessType) (sm *SCSIMount, err error) { sm, existed, err := uvm.allocateSCSIMount(ctx, readOnly, hostPath, uvmPath, attachmentType, vmAccess) if err != nil { return nil, err @@ -304,6 +313,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, atta Lun: uint8(sm.LUN), Controller: uint8(sm.Controller), ReadOnly: readOnly, + Options: guestOptions, } } SCSIModification.GuestRequest = guestReq diff --git a/test/cri-containerd/runpodsandbox_test.go b/test/cri-containerd/runpodsandbox_test.go index 8a50668537..9758b194ae 100644 --- a/test/cri-containerd/runpodsandbox_test.go +++ b/test/cri-containerd/runpodsandbox_test.go @@ -913,6 +913,132 @@ func Test_RunPodSandbox_MultipleContainersSameVhd_LCOW(t *testing.T) { } } +func Test_RunPodSandbox_MultipleContainersSameVhd_RShared_LCOW(t *testing.T) { + requireFeatures(t, featureLCOW) + + pullRequiredLcowImages(t, []string{imageLcowK8sPause, imageLcowAlpine}) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sbRequest := getRunPodSandboxRequest(t, lcowRuntimeHandler) + sbRequest.Config.Linux = &runtime.LinuxPodSandboxConfig{ + SecurityContext: &runtime.LinuxSandboxSecurityContext{ + Privileged: true, + }, + } + + podID := runPodSandbox(t, client, ctx, sbRequest) + defer removePodSandbox(t, client, ctx, podID) + defer stopPodSandbox(t, client, ctx, podID) + + // Create a temporary ext4 VHD to mount into the container. + vhdHostDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("failed to create temp directory: %s", err) + } + defer os.RemoveAll(vhdHostDir) + vhdHostPath := filepath.Join(vhdHostDir, "temp.vhdx") + createExt4VHD(ctx, t, vhdHostPath) + + vhdContainerPath := "/containerDir" + cRequest := &runtime.CreateContainerRequest{ + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{}, + Image: &runtime.ImageSpec{ + Image: imageLcowAlpine, + }, + // Hold this command open until killed + Command: []string{ + "top", + }, + Linux: &runtime.LinuxContainerConfig{ + SecurityContext: &runtime.LinuxContainerSecurityContext{ + Privileged: true, + }, + }, + Mounts: []*runtime.Mount{ + { + HostPath: "vhd://" + vhdHostPath, + ContainerPath: vhdContainerPath, + // set 'rshared' propagation + Propagation: runtime.MountPropagation_PROPAGATION_BIDIRECTIONAL, + }, + }, + }, + PodSandboxId: podID, + SandboxConfig: sbRequest.Config, + } + + containerName := t.Name() + "-Container-0" + cRequest.Config.Metadata.Name = containerName + containerId0 := createContainer(t, client, ctx, cRequest) + defer removeContainer(t, client, ctx, containerId0) + startContainer(t, client, ctx, containerId0) + defer stopContainer(t, client, ctx, containerId0) + + containerName1 := t.Name() + "-Container-1" + cRequest.Config.Metadata.Name = containerName1 + containerId1 := createContainer(t, client, ctx, cRequest) + defer removeContainer(t, client, ctx, containerId1) + startContainer(t, client, ctx, containerId1) + defer stopContainer(t, client, ctx, containerId1) + + // create a test directory that will be the new mountpoint's source + createTestDirCmd := []string{ + "mkdir", + "/tmp/testdir", + } + _, errorMsg, exitCode := execContainer(t, client, ctx, containerId0, createTestDirCmd) + if exitCode != 0 { + t.Fatalf("Exec into container failed with: %v and exit code: %d, %s", errorMsg, exitCode, containerId0) + } + + // create a file in the test directory + createTestDirContentCmd := []string{ + "touch", + "/tmp/testdir/test.txt", + } + _, errorMsg, exitCode = execContainer(t, client, ctx, containerId0, createTestDirContentCmd) + if exitCode != 0 { + t.Fatalf("Exec into container failed with: %v and exit code: %d, %s", errorMsg, exitCode, containerId0) + } + + // create a test directory in the vhd that will be the new mountpoint's destination + createTestDirVhdCmd := []string{ + "mkdir", + fmt.Sprintf("%s/testdir", vhdContainerPath), + } + _, errorMsg, exitCode = execContainer(t, client, ctx, containerId0, createTestDirVhdCmd) + if exitCode != 0 { + t.Fatalf("Exec into container failed with: %v and exit code: %d, %s", errorMsg, exitCode, containerId0) + } + + // perform rshared mount of test directory into the vhd + mountTestDirToVhdCmd := []string{ + "mount", + "-o", + "rshared", + "/tmp/testdir", + fmt.Sprintf("%s/testdir", vhdContainerPath), + } + _, errorMsg, exitCode = execContainer(t, client, ctx, containerId0, mountTestDirToVhdCmd) + if exitCode != 0 { + t.Fatalf("Exec into container failed with: %v and exit code: %d, %s", errorMsg, exitCode, containerId0) + } + + // try to list the test file in the second container to verify it was propagated correctly + verifyTestMountCommand := []string{ + "ls", + fmt.Sprintf("%s/testdir/test.txt", vhdContainerPath), + } + _, errorMsg, exitCode = execContainer(t, client, ctx, containerId1, verifyTestMountCommand) + if exitCode != 0 { + t.Fatalf("Exec into container failed with: %v and exit code: %d, %s", errorMsg, exitCode, containerId1) + } +} + func Test_RunPodSandbox_MultipleContainersSameVhd_WCOW(t *testing.T) { requireFeatures(t, featureWCOWHypervisor) // Prior to 19H1, we aren't able to easily create a formatted VHD, as diff --git a/test/functional/lcow_test.go b/test/functional/lcow_test.go index 1684cdce40..b86392d7c3 100644 --- a/test/functional/lcow_test.go +++ b/test/functional/lcow_test.go @@ -143,7 +143,9 @@ func TestLCOWSimplePodScenario(t *testing.T) { if err := lcow.CreateScratch(context.Background(), lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { t.Fatal(err) } - if _, err := lcowUVM.AddSCSI(context.Background(), uvmScratchFile, `/tmp/scratch`, false, uvm.VMAccessTypeIndividual); err != nil { + + var options []string + if _, err := lcowUVM.AddSCSI(context.Background(), uvmScratchFile, `/tmp/scratch`, false, options, uvm.VMAccessTypeIndividual); err != nil { t.Fatal(err) } diff --git a/test/functional/uvm_scratch_test.go b/test/functional/uvm_scratch_test.go index e2e81950b7..fe5041ad25 100644 --- a/test/functional/uvm_scratch_test.go +++ b/test/functional/uvm_scratch_test.go @@ -45,7 +45,8 @@ func TestScratchCreateLCOW(t *testing.T) { } // Make sure it can be added (verifies it has access correctly) - scsiMount, err := targetUVM.AddSCSI(context.Background(), destTwo, "", false, uvm.VMAccessTypeIndividual) + var options []string + scsiMount, err := targetUVM.AddSCSI(context.Background(), destTwo, "", false, options, uvm.VMAccessTypeIndividual) if err != nil { t.Fatal(err) } diff --git a/test/functional/uvm_scsi_test.go b/test/functional/uvm_scsi_test.go index 0ba925ea44..4a9962d422 100644 --- a/test/functional/uvm_scsi_test.go +++ b/test/functional/uvm_scsi_test.go @@ -51,7 +51,8 @@ func testAddSCSI(u *uvm.UtilityVM, disks []string, pathPrefix string, usePath bo if usePath { uvmPath = fmt.Sprintf(`%s%d`, pathPrefix, i) } - scsiMount, err := u.AddSCSI(context.Background(), disks[i], uvmPath, false, uvm.VMAccessTypeIndividual) + var options []string + scsiMount, err := u.AddSCSI(context.Background(), disks[i], uvmPath, false, options, uvm.VMAccessTypeIndividual) if err != nil { return err } @@ -274,7 +275,9 @@ func TestParallelScsiOps(t *testing.T) { t.Errorf("failed to grantvmaccess for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) continue } - _, err = u.AddSCSI(context.Background(), path, "", false, uvm.VMAccessTypeIndividual) + + var options []string + _, err = u.AddSCSI(context.Background(), path, "", false, options, uvm.VMAccessTypeIndividual) if err != nil { os.Remove(path) t.Errorf("failed to AddSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) @@ -286,7 +289,8 @@ func TestParallelScsiOps(t *testing.T) { // This worker cant continue because the index is dead. We have to stop break } - _, err = u.AddSCSI(context.Background(), path, fmt.Sprintf("/run/gcs/c/0/scsi/%d", iteration), false, uvm.VMAccessTypeIndividual) + + _, err = u.AddSCSI(context.Background(), path, fmt.Sprintf("/run/gcs/c/0/scsi/%d", iteration), false, options, uvm.VMAccessTypeIndividual) if err != nil { os.Remove(path) t.Errorf("failed to AddSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go index afb7b5ad06..67f56e28f8 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go @@ -25,10 +25,11 @@ type CombinedLayers struct { // SCSI. Scratch space for remote file-system commands, or R/W layer for containers type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` + MountPath string `json:"MountPath,omitempty"` + Lun uint8 `json:"Lun,omitempty"` + Controller uint8 `json:"Controller,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` + Options []string `json:"Options,omitempty"` } type WCOWMappedVirtualDisk struct { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go index 66752d18a1..2e634c6e2a 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go @@ -89,11 +89,12 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * break } } + l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) if mount.Type == "physical-disk" { l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount") uvmPathForShare = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, coi.HostingSystem.UVMMountCounter()) - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly) + scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly, mount.Options) if err != nil { return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) } @@ -107,7 +108,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // if the scsi device is already attached then we take the uvm path that the function below returns // that is where it was previously mounted in UVM - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, hostPath, uvmPathForShare, readOnly, uvm.VMAccessTypeIndividual) + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, hostPath, uvmPathForShare, readOnly, mount.Options, uvm.VMAccessTypeIndividual) if err != nil { return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) } @@ -136,6 +137,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * uvmPathForFile = path.Join(uvmPathForShare, fileName) } l.Debug("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount") + share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForShare, readOnly, restrictAccess, allowedNames) if err != nil { return errors.Wrapf(err, "adding plan9 mount %+v", mount) @@ -172,7 +174,8 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // use lcowNvidiaMountPath since we only support nvidia gpus right now // must use scsi here since DDA'ing a hyper-v pci device is not supported on VMs that have ANY virtual memory // gpuvhd must be granted VM Group access. - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, gpuSupportVhdPath, uvm.LCOWNvidiaMountPath, true, uvm.VMAccessTypeNoop) + options := []string{"ro"} + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, gpuSupportVhdPath, uvm.LCOWNvidiaMountPath, true, options, uvm.VMAccessTypeNoop) if err != nil { return errors.Wrapf(err, "failed to add scsi device %s in the UVM %s at %s", gpuSupportVhdPath, coi.HostingSystem.ID(), uvm.LCOWNvidiaMountPath) } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go index f2984a423b..79443290a5 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go @@ -135,7 +135,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) if mount.Type == "physical-disk" { l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly) + scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly, mount.Options) if err != nil { return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) } @@ -143,7 +143,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R r.Add(scsiMount) } else if mount.Type == "virtual-disk" { l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSI(ctx, mount.Source, uvmPath, readOnly, uvm.VMAccessTypeIndividual) + scsiMount, err := coi.HostingSystem.AddSCSI(ctx, mount.Source, uvmPath, readOnly, mount.Options, uvm.VMAccessTypeIndividual) if err != nil { return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go index f3f4e77edd..123b87ebb1 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go @@ -161,7 +161,8 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot } log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") - scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, uvmpkg.VMAccessTypeIndividual) + var options []string + scsiMount, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false, options, uvmpkg.VMAccessTypeIndividual) if err != nil { return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } @@ -223,8 +224,9 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) } } + options := []string{"ro"} uvmPath = fmt.Sprintf(uvmpkg.LCOWGlobalMountPrefix, uvm.UVMMountCounter()) - sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, uvmpkg.VMAccessTypeNoop) + sm, err := uvm.AddSCSI(ctx, layerPath, uvmPath, true, options, uvmpkg.VMAccessTypeNoop) if err != nil { return "", fmt.Errorf("failed to add SCSI layer: %s", err) } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go index a208cb7231..c7af7cf6ce 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/disk.go @@ -28,7 +28,8 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er "dest": destPath, }).Debug("lcow::FormatDisk opts") - scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false) // No destination as not formatted + var options []string + scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false, options) // No destination as not formatted if err != nil { return err } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go index 8d1e337b52..12884b4745 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go @@ -66,7 +66,8 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, return fmt.Errorf("failed to create VHDx %s: %s", destFile, err) } - scsi, err := lcowUVM.AddSCSI(ctx, destFile, "", false, uvm.VMAccessTypeIndividual) // No destination as not formatted + var options []string + scsi, err := lcowUVM.AddSCSI(ctx, destFile, "", false, options, uvm.VMAccessTypeIndividual) // No destination as not formatted if err != nil { return err } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go index a183204cb1..1e7985dba1 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go @@ -222,8 +222,8 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { // `readOnly` set to `true` if the vhd/vhdx should be attached read only. // // `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath string, readOnly bool, vmAccess VMAccessType) (*SCSIMount, error) { - return uvm.addSCSIActual(ctx, hostPath, uvmPath, "VirtualDisk", readOnly, vmAccess) +func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath string, readOnly bool, options []string, vmAccess VMAccessType) (*SCSIMount, error) { + return uvm.addSCSIActual(ctx, hostPath, uvmPath, "VirtualDisk", readOnly, options, vmAccess) } // AddSCSIPhysicalDisk attaches a physical disk from the host directly to the @@ -234,8 +234,8 @@ func (uvm *UtilityVM) AddSCSI(ctx context.Context, hostPath string, uvmPath stri // `uvmPath` is optional if a guest mount is not requested. // // `readOnly` set to `true` if the physical disk should be attached read only. -func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool) (*SCSIMount, error) { - return uvm.addSCSIActual(ctx, hostPath, uvmPath, "PassThru", readOnly, VMAccessTypeIndividual) +func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool, options []string) (*SCSIMount, error) { + return uvm.addSCSIActual(ctx, hostPath, uvmPath, "PassThru", readOnly, options, VMAccessTypeIndividual) } // addSCSIActual is the implementation behind the external functions AddSCSI and @@ -252,7 +252,7 @@ func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath // `vmAccess` indicates what access to grant the vm for the hostpath // // Returns result from calling modify with the given scsi mount -func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, attachmentType string, readOnly bool, vmAccess VMAccessType) (sm *SCSIMount, err error) { +func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, attachmentType string, readOnly bool, options []string, vmAccess VMAccessType) (sm *SCSIMount, err error) { sm, existed, err := uvm.allocateSCSIMount(ctx, readOnly, hostPath, uvmPath, attachmentType, vmAccess) if err != nil { return nil, err @@ -304,6 +304,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, atta Lun: uint8(sm.LUN), Controller: uint8(sm.Controller), ReadOnly: readOnly, + Options: options, } } SCSIModification.GuestRequest = guestReq