From 38e70aab343e231ee48589a5cf7d8c3fe56bebb9 Mon Sep 17 00:00:00 2001 From: Daniel Canter Date: Wed, 26 May 2021 06:15:46 -0700 Subject: [PATCH] Rework UVM package to use vm package abstractions Rework the UVM package to make use of the abstractions around virtstack interactions. Signed-off-by: Daniel Canter --- internal/gcs/guestconnection.go | 13 -- internal/oci/uvm.go | 18 ++- internal/uvm/clone.go | 15 +- internal/uvm/combine_layers.go | 48 +++--- internal/uvm/computeagent.go | 10 +- internal/uvm/cpugroups.go | 18 +-- internal/uvm/cpulimits_update.go | 15 +- internal/uvm/create.go | 115 ++++++++------- internal/uvm/create_lcow.go | 227 ++++++++++++++++------------ internal/uvm/create_wcow.go | 238 +++++++++++++++--------------- internal/uvm/guest_request.go | 10 +- internal/uvm/hvsocket.go | 36 ++--- internal/uvm/memory_update.go | 23 +-- internal/uvm/modify.go | 47 ------ internal/uvm/network.go | 120 +++++++-------- internal/uvm/pipes.go | 25 ++-- internal/uvm/plan9.go | 82 +++++----- internal/uvm/scsi.go | 103 +++++++------ internal/uvm/start.go | 56 +++---- internal/uvm/stats.go | 145 +----------------- internal/uvm/types.go | 17 +-- internal/uvm/update_uvm.go | 4 +- internal/uvm/virtual_device.go | 45 +++--- internal/uvm/vpmem.go | 54 +++---- internal/uvm/vsmb.go | 75 +++++----- internal/uvm/wait.go | 3 +- internal/vm/builder.go | 18 ++- internal/vm/hcs/builder.go | 34 ++++- internal/vm/hcs/container.go | 27 ++++ internal/vm/hcs/hcs.go | 13 ++ internal/vm/hcs/memory.go | 14 +- internal/vm/hcs/network.go | 19 +++ internal/vm/hcs/opts.go | 54 +++++++ internal/vm/hcs/pipe.go | 26 ++++ internal/vm/hcs/processor.go | 33 +++++ internal/vm/hcs/scsi.go | 15 +- internal/vm/hcs/storage.go | 8 +- internal/vm/hcs/vmsocket.go | 21 +++ internal/vm/hcs/vsmb.go | 24 +-- internal/vm/hcs/windows.go | 11 ++ internal/vm/remotevm/builder.go | 80 +++++++--- internal/vm/remotevm/memory.go | 6 +- internal/vm/remotevm/network.go | 6 +- internal/vm/remotevm/opts.go | 19 +++ internal/vm/remotevm/processor.go | 7 +- internal/vm/remotevm/remotevm.go | 43 ++++-- internal/vm/remotevm/vmsocket.go | 33 +++-- internal/vm/vm.go | 60 +++++++- 48 files changed, 1175 insertions(+), 958 deletions(-) create mode 100644 internal/vm/hcs/container.go create mode 100644 internal/vm/hcs/opts.go create mode 100644 internal/vm/hcs/pipe.go create mode 100644 internal/vm/remotevm/opts.go diff --git a/internal/gcs/guestconnection.go b/internal/gcs/guestconnection.go index f01b97939b..478516b9c5 100644 --- a/internal/gcs/guestconnection.go +++ b/internal/gcs/guestconnection.go @@ -12,8 +12,6 @@ import ( "strings" "sync" - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/log" @@ -34,17 +32,6 @@ const ( // the vsock port `port`. type IoListenFunc func(port uint32) (net.Listener, error) -// HvsockIoListen returns an implementation of IoListenFunc that listens -// on the specified vsock port for the VM specified by `vmID`. -func HvsockIoListen(vmID guid.GUID) IoListenFunc { - return func(port uint32) (net.Listener, error) { - return winio.ListenHvsock(&winio.HvsockAddr{ - VMID: vmID, - ServiceID: winio.VsockServiceID(port), - }) - } -} - // GuestConnectionConfig contains options for creating a guest connection. type GuestConnectionConfig struct { // Conn specifies the connection to use for the bridge. It will be closed diff --git a/internal/oci/uvm.go b/internal/oci/uvm.go index db3f429253..804020aefc 100644 --- a/internal/oci/uvm.go +++ b/internal/oci/uvm.go @@ -156,9 +156,13 @@ const ( // the TemplateID. It is the client's responsibility to make sure that the sandbox // within which a cloned container needs to be created must also be created from the // same template. - annotationTemplateID = "io.microsoft.virtualmachine.templateid" - annotationNetworkConfigProxy = "io.microsoft.network.ncproxy" - AnnotationNcproxyContainerID = "io.microsoft.network.ncproxy.containerid" + annotationTemplateID = "io.microsoft.virtualmachine.templateid" + annotationNetworkConfigProxy = "io.microsoft.network.ncproxy" + AnnotationNcproxyContainerID = "io.microsoft.network.ncproxy.containerid" + annotationsVMSource = "io.microsoft.virtualmachine.vmsource" + annotationVMServiceAddress = "io.microsoft.virtualmachine.vmservice.address" + annotationVMServiceBinPath = "io.microsoft.virtualmachine.vmservice.path" + annotationIgnoreSupportedCheck = "io.microsoft.virtualmachine.vmservice.ignoresupported" ) // parseAnnotationsBool searches `a` for `key` and if found verifies that the @@ -462,6 +466,10 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( lopts.BootFilesPath = parseAnnotationsString(s.Annotations, annotationBootFilesRootPath, lopts.BootFilesPath) lopts.CPUGroupID = parseAnnotationsString(s.Annotations, annotationCPUGroupID, lopts.CPUGroupID) lopts.NetworkConfigProxy = parseAnnotationsString(s.Annotations, annotationNetworkConfigProxy, lopts.NetworkConfigProxy) + lopts.VMSource = parseAnnotationsString(s.Annotations, annotationsVMSource, lopts.VMSource) + lopts.VMServiceAddress = parseAnnotationsString(s.Annotations, annotationVMServiceAddress, lopts.VMServiceAddress) + lopts.VMServicePath = parseAnnotationsString(s.Annotations, annotationVMServiceBinPath, lopts.VMServicePath) + lopts.IgnoreSupportedCheck = parseAnnotationsBool(ctx, s.Annotations, annotationIgnoreSupportedCheck, lopts.IgnoreSupportedCheck) handleAnnotationPreferredRootFSType(ctx, s.Annotations, lopts) handleAnnotationKernelDirectBoot(ctx, s.Annotations, lopts) @@ -486,6 +494,10 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( wopts.CPUGroupID = parseAnnotationsString(s.Annotations, annotationCPUGroupID, wopts.CPUGroupID) wopts.NetworkConfigProxy = parseAnnotationsString(s.Annotations, annotationNetworkConfigProxy, wopts.NetworkConfigProxy) wopts.NoDirectMap = parseAnnotationsBool(ctx, s.Annotations, annotationVSMBNoDirectMap, wopts.NoDirectMap) + wopts.VMSource = parseAnnotationsString(s.Annotations, annotationsVMSource, wopts.VMSource) + wopts.VMServiceAddress = parseAnnotationsString(s.Annotations, annotationVMServiceAddress, wopts.VMServiceAddress) + wopts.VMServicePath = parseAnnotationsString(s.Annotations, annotationVMServiceBinPath, wopts.VMServicePath) + wopts.IgnoreSupportedCheck = parseAnnotationsBool(ctx, s.Annotations, annotationIgnoreSupportedCheck, wopts.IgnoreSupportedCheck) handleAnnotationFullyPhysicallyBacked(ctx, s.Annotations, wopts) if err := handleCloneAnnotations(ctx, s.Annotations, wopts); err != nil { return nil, err diff --git a/internal/uvm/clone.go b/internal/uvm/clone.go index 9cc574612a..3595b8ec3c 100644 --- a/internal/uvm/clone.go +++ b/internal/uvm/clone.go @@ -5,12 +5,11 @@ import ( "fmt" "github.com/Microsoft/hcsshim/internal/cow" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/pkg/errors" ) const ( - hcsComputeSystemSaveType = "AsTemplate" // default namespace ID used for all template and clone VMs. DEFAULT_CLONE_NETWORK_NAMESPACE_ID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" ) @@ -55,8 +54,8 @@ type Cloneable interface { // A struct to keep all the information that might be required during cloning process of // a resource. type cloneData struct { - // doc spec for the clone - doc *hcsschema.ComputeSystem + // Builder for the virtual machine document. + builder vm.UVMBuilder // scratchFolder of the clone scratchFolder string // UVMID of the clone @@ -112,14 +111,10 @@ func (uvm *UtilityVM) GenerateTemplateConfig() (*UVMTemplateConfig, error) { // uvm must be in the paused state before it can be saved as a template.save call will throw // an incorrect uvm state exception if uvm is not in the paused state at the time of saving. func (uvm *UtilityVM) SaveAsTemplate(ctx context.Context) error { - if err := uvm.hcsSystem.Pause(ctx); err != nil { + if err := uvm.vm.Pause(ctx); err != nil { return errors.Wrap(err, "error pausing the VM") } - - saveOptions := hcsschema.SaveOptions{ - SaveType: hcsComputeSystemSaveType, - } - if err := uvm.hcsSystem.Save(ctx, saveOptions); err != nil { + if err := uvm.vm.Save(ctx); err != nil { return errors.Wrap(err, "error saving the VM") } return nil diff --git a/internal/uvm/combine_layers.go b/internal/uvm/combine_layers.go index 74c0ac70e2..3bbd11a803 100644 --- a/internal/uvm/combine_layers.go +++ b/internal/uvm/combine_layers.go @@ -16,17 +16,15 @@ func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcssch if uvm.operatingSystem != "windows" { return errNotSupported } - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeCombinedLayers, - RequestType: requesttype.Add, - Settings: guestrequest.CombinedLayers{ - ContainerRootPath: containerRootPath, - Layers: layerPaths, - }, + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Add, + Settings: guestrequest.CombinedLayers{ + ContainerRootPath: containerRootPath, + Layers: layerPaths, }, } - return uvm.modify(ctx, msr) + return uvm.GuestRequest(ctx, guestReq) } // CombineLayersLCOW combines `layerPaths` and optionally `scratchPath` into an @@ -44,32 +42,28 @@ func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, layerPaths []string for _, l := range layerPaths { layers = append(layers, hcsschema.Layer{Path: l}) } - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeCombinedLayers, - RequestType: requesttype.Add, - Settings: guestrequest.CombinedLayers{ - ContainerRootPath: rootfsPath, - Layers: layers, - ScratchPath: scratchPath, - }, + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Add, + Settings: guestrequest.CombinedLayers{ + ContainerRootPath: rootfsPath, + Layers: layers, + ScratchPath: scratchPath, }, } - return uvm.modify(ctx, msr) + return uvm.GuestRequest(ctx, guestReq) } // RemoveCombinedLayers removes the previously combined layers at `rootfsPath`. // // NOTE: `rootfsPath` is the path from within the UVM. func (uvm *UtilityVM) RemoveCombinedLayers(ctx context.Context, rootfsPath string) error { - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeCombinedLayers, - RequestType: requesttype.Remove, - Settings: guestrequest.CombinedLayers{ - ContainerRootPath: rootfsPath, - }, + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Remove, + Settings: guestrequest.CombinedLayers{ + ContainerRootPath: rootfsPath, }, } - return uvm.modify(ctx, msr) + return uvm.GuestRequest(ctx, guestReq) } diff --git a/internal/uvm/computeagent.go b/internal/uvm/computeagent.go index 3ccacd878f..465a865e93 100644 --- a/internal/uvm/computeagent.go +++ b/internal/uvm/computeagent.go @@ -6,8 +6,8 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/computeagent" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/hns" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/pkg/octtrpc" "github.com/containerd/ttrpc" "github.com/pkg/errors" @@ -69,16 +69,16 @@ func (ca *computeAgent) ModifyNIC(ctx context.Context, req *computeagent.ModifyN return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName) } - moderationValue := hcsschema.InterruptModerationValue(req.IovPolicySettings.InterruptModeration) - moderationName := hcsschema.InterruptModerationValueToName[moderationValue] + moderationValue := vm.InterruptModerationValue(req.IovPolicySettings.InterruptModeration) + moderationName := vm.InterruptModerationValueToName[moderationValue] - iovSettings := &hcsschema.IovSettings{ + iovSettings := &vm.IovSettings{ OffloadWeight: &req.IovPolicySettings.IovOffloadWeight, QueuePairsRequested: &req.IovPolicySettings.QueuePairsRequested, InterruptModeration: &moderationName, } - nic := &hcsschema.NetworkAdapter{ + nic := &vm.NetworkAdapter{ EndpointId: endpoint.Id, MacAddress: endpoint.MacAddress, IovSettings: iovSettings, diff --git a/internal/uvm/cpugroups.go b/internal/uvm/cpugroups.go index 17d442c88e..74f8104d26 100644 --- a/internal/uvm/cpugroups.go +++ b/internal/uvm/cpugroups.go @@ -2,12 +2,11 @@ package uvm import ( "context" - "errors" "fmt" "github.com/Microsoft/hcsshim/internal/cpugroup" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" ) // Build that assigning a cpu group on creation of a vm is supported @@ -33,16 +32,11 @@ func (uvm *UtilityVM) SetCPUGroup(ctx context.Context, id string) error { // setCPUGroup sets the VM's cpugroup func (uvm *UtilityVM) setCPUGroup(ctx context.Context, id string) error { - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.CPUGroupResourcePath, - Settings: &hcsschema.CpuGroup{ - Id: id, - }, + windows, ok := uvm.vm.(vm.WindowsConfigManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping cpu group operation") } - if err := uvm.modify(ctx, req); err != nil { - return err - } - return nil + return windows.SetCPUGroup(ctx, id) } // unsetCPUGroup sets the VM's cpugroup to the null group ID diff --git a/internal/uvm/cpulimits_update.go b/internal/uvm/cpulimits_update.go index 264da31a28..b6fb5358ae 100644 --- a/internal/uvm/cpulimits_update.go +++ b/internal/uvm/cpulimits_update.go @@ -3,16 +3,15 @@ package uvm import ( "context" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" ) // UpdateCPULimits updates the CPU limits of the utility vm -func (uvm *UtilityVM) UpdateCPULimits(ctx context.Context, limits *hcsschema.ProcessorLimits) error { - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.CPULimitsResourcePath, - Settings: limits, +func (uvm *UtilityVM) UpdateCPULimits(ctx context.Context, limits *vm.ProcessorLimits) error { + cpu, ok := uvm.vm.(vm.ProcessorManager) + if !ok || !uvm.vm.Supported(vm.Processor, vm.Update) { + return errors.Wrap(vm.ErrNotSupported, "stopping update of cpus") } - - return uvm.modify(ctx, req) + return cpu.SetProcessorLimits(ctx, limits) } diff --git a/internal/uvm/create.go b/internal/uvm/create.go index 6f7f844462..7d54c5f60a 100644 --- a/internal/uvm/create.go +++ b/internal/uvm/create.go @@ -2,23 +2,23 @@ package uvm import ( "context" - "errors" "fmt" "os" "path/filepath" "runtime" "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/Microsoft/hcsshim/internal/vm/hcs" + "github.com/Microsoft/hcsshim/internal/vm/remotevm" "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" - "golang.org/x/sys/windows" ) // Options are the set of options passed to Create() to create a utility vm. @@ -80,11 +80,56 @@ type Options struct { // CPUGroupID set the ID of a CPUGroup on the host that the UVM should be added to on start. // Defaults to an empty string which indicates the UVM should not be added to any CPUGroup. CPUGroupID string + // NetworkConfigProxy holds the address of the network config proxy service. // This != "" determines whether to start the ComputeAgent TTRPC service // that receives the UVMs set of NICs from this proxy instead of enumerating // the endpoints locally. NetworkConfigProxy string + + // VMSource is a string signifying what virtstack to use to launch a utility VM. + // 1. hcs - The default if no option set explicitly. + // 2. remotevm - Talk to another virtstack that implements the vmservice ttrpc interface to launch + // VMs. + VMSource string + + // VMServiceAddress specifies the address to connect to talk to a process implementing the vmservice + // ttrpc interface. + VMServiceAddress string + + // VMServicePath specifies the path on disk of the binary to launch that implements the vmservice ttrpc + // interface. If this is omitted and VMServiceAddress is present, it's inferred that the binary is already + // up and running and another instance isn't needed. If there needs to be a new instance per virtual machine + // launched by the service, this will have to be provided. + VMServicePath string + + // IgnoreSupportedCheck ignores any capability checks for virtstack functionality. + IgnoreSupportedCheck bool +} + +func applyHcsOpts(opts interface{}) []vm.CreateOpt { + var hcsOpts []vm.CreateOpt + switch opts := opts.(type) { + case *OptionsLCOW: + case *OptionsWCOW: + if !opts.DisableCompartmentNamespace { + hcsOpts = append(hcsOpts, hcs.WithEnableCompartmentNamespace()) + } + if opts.IsClone { + if opts.TemplateConfig != nil { + hcsOpts = append(hcsOpts, hcs.WithCloneConfig(opts.TemplateConfig.UVMID)) + } + } + } + return hcsOpts +} + +func applyRemoteVMOpts(opts *Options) []vm.CreateOpt { + var remoteVMOpts []vm.CreateOpt + if opts.IgnoreSupportedCheck { + remoteVMOpts = append(remoteVMOpts, remotevm.WithIgnoreSupported()) + } + return remoteVMOpts } // compares the create opts used during template creation with the create opts @@ -178,6 +223,7 @@ func newDefaultOptions(id, owner string) *Options { EnableDeferredCommit: false, ProcessorCount: defaultProcessorCount(), FullyPhysicallyBacked: false, + VMSource: vm.HCS, } if opts.Owner == "" { @@ -189,7 +235,7 @@ func newDefaultOptions(id, owner string) *Options { // ID returns the ID of the VM's compute system. func (uvm *UtilityVM) ID() string { - return uvm.hcsSystem.ID() + return uvm.vm.ID() } // OS returns the operating system of the utility VM. @@ -197,33 +243,8 @@ func (uvm *UtilityVM) OS() string { return uvm.operatingSystem } -func (uvm *UtilityVM) create(ctx context.Context, doc interface{}) error { - uvm.exitCh = make(chan struct{}) - system, err := hcs.CreateComputeSystem(ctx, uvm.id, doc) - if err != nil { - return err - } - defer func() { - if system != nil { - _ = system.Terminate(ctx) - _ = system.Wait() - } - }() - - // Cache the VM ID of the utility VM. - properties, err := system.Properties(ctx) - if err != nil { - return err - } - uvm.runtimeID = properties.RuntimeID - uvm.hcsSystem = system - system = nil - - log.G(ctx).WithFields(logrus.Fields{ - logfields.UVMID: uvm.id, - "runtime-id": uvm.runtimeID.String(), - }).Debug("created utility VM") - return nil +func (uvm *UtilityVM) VM() vm.UVM { + return uvm.vm } // Close terminates and releases resources associated with the utility VM. @@ -233,10 +254,8 @@ func (uvm *UtilityVM) Close() (err error) { defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute(logfields.UVMID, uvm.id)) - windows.Close(uvm.vmmemProcess) - - if uvm.hcsSystem != nil { - _ = uvm.hcsSystem.Terminate(ctx) + if uvm.vm != nil { + _ = uvm.vm.Stop(ctx) _ = uvm.Wait() } @@ -252,8 +271,9 @@ func (uvm *UtilityVM) Close() (err error) { uvm.outputListener.Close() uvm.outputListener = nil } - if uvm.hcsSystem != nil { - return uvm.hcsSystem.Close() + + if uvm.vm != nil { + return uvm.vm.Close() } return nil } @@ -267,18 +287,7 @@ func (uvm *UtilityVM) CreateContainer(ctx context.Context, id string, settings i } return c, nil } - doc := hcsschema.ComputeSystem{ - HostingSystemId: uvm.id, - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - HostedSystem: settings, - } - c, err := hcs.CreateComputeSystem(ctx, id, &doc) - if err != nil { - return nil, err - } - return c, err + return nil, errors.New("no guest connection available to create container") } // CreateProcess creates a process in the utility VM. @@ -286,7 +295,7 @@ func (uvm *UtilityVM) CreateProcess(ctx context.Context, settings interface{}) ( if uvm.gc != nil { return uvm.gc.CreateProcess(ctx, settings) } - return uvm.hcsSystem.CreateProcess(ctx, settings) + return nil, errors.New("no guest connection available to create process") } // IsOCI returns false, indicating the parameters to CreateProcess should not @@ -297,12 +306,12 @@ func (uvm *UtilityVM) IsOCI() bool { // Terminate requests that the utility VM be terminated. func (uvm *UtilityVM) Terminate(ctx context.Context) error { - return uvm.hcsSystem.Terminate(ctx) + return uvm.vm.Stop(ctx) } // ExitError returns an error if the utility VM has terminated unexpectedly. func (uvm *UtilityVM) ExitError() error { - return uvm.hcsSystem.ExitError() + return uvm.vm.ExitError() } func defaultProcessorCount() int32 { diff --git a/internal/uvm/create_lcow.go b/internal/uvm/create_lcow.go index 92c7e8d478..f6de528120 100644 --- a/internal/uvm/create_lcow.go +++ b/internal/uvm/create_lcow.go @@ -10,17 +10,18 @@ import ( "strings" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/Microsoft/hcsshim/internal/vm/hcs" + "github.com/Microsoft/hcsshim/internal/vm/remotevm" "github.com/containerd/ttrpc" "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/gcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" - "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -137,7 +138,7 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { return opts } -// CreateLCOW creates an HCS compute system representing a utility VM. +// CreateLCOW creates a Linux Utility VM. func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error) { ctx, span := trace.StartSpan(ctx, "uvm::CreateLCOW") defer span.End() @@ -182,6 +183,7 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) { return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath) } + rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile) if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) { return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath) @@ -196,6 +198,28 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error return nil, fmt.Errorf("failed to get host processor information: %s", err) } + var ( + uvmb vm.UVMBuilder + cOpts []vm.CreateOpt + ) + switch opts.VMSource { + case vm.HCS: + uvmb, err = hcs.NewUVMBuilder(uvm.id, uvm.owner, vm.Linux) + if err != nil { + return nil, errors.Wrap(err, "failed to create UVM builder") + } + cOpts = applyHcsOpts(opts) + case vm.RemoteVM: + uvmb, err = remotevm.NewUVMBuilder(ctx, uvm.id, uvm.owner, opts.VMServicePath, opts.VMServiceAddress, vm.Linux) + if err != nil { + return nil, errors.Wrap(err, "failed to create UVM builder") + } + cOpts = applyRemoteVMOpts(opts.Options) + default: + return nil, fmt.Errorf("unknown VM source: %s", opts.VMSource) + } + uvm.builder = uvmb + // To maintain compatability with Docker we need to automatically downgrade // a user CPU count if the setting is not possible. uvm.processorCount = uvm.normalizeProcessorCount(ctx, opts.ProcessorCount, processorTopology) @@ -203,71 +227,82 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error // Align the requested memory size. memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) - processor := &hcsschema.Processor2{ - Count: uvm.processorCount, - Limit: opts.ProcessorLimit, - Weight: opts.ProcessorWeight, - } // We can set a cpu group for the VM at creation time in recent builds. if opts.CPUGroupID != "" { if osversion.Build() < cpuGroupCreateBuild { return nil, errCPUGroupCreateNotSupported } - processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID} - } - - doc := &hcsschema.ComputeSystem{ - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - VirtualMachine: &hcsschema.VirtualMachine{ - StopOnReset: true, - Chipset: &hcsschema.Chipset{}, - ComputeTopology: &hcsschema.Topology{ - Memory: &hcsschema.Memory2{ - SizeInMB: memorySizeInMB, - AllowOvercommit: opts.AllowOvercommit, - EnableDeferredCommit: opts.EnableDeferredCommit, - EnableColdDiscardHint: opts.EnableColdDiscardHint, - LowMMIOGapInMB: opts.LowMMIOGapInMB, - HighMMIOBaseInMB: opts.HighMMIOBaseInMB, - HighMMIOGapInMB: opts.HighMMIOGapInMB, - }, - Processor: processor, - }, - Devices: &hcsschema.Devices{ - HvSocket: &hcsschema.HvSocket2{ - HvSocketConfig: &hcsschema.HvSocketSystemConfig{ - // Allow administrators and SYSTEM to bind to vsock sockets - // so that we can create a GCS log socket. - DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - }, - }, - Plan9: &hcsschema.Plan9{}, - }, - }, + windows, ok := uvmb.(vm.WindowsConfigManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping cpu group setup") + } + if err := windows.SetCPUGroup(ctx, opts.CPUGroupID); err != nil { + return nil, err + } + } + + mem, ok := uvmb.(vm.MemoryManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping memory setup") + } + + if err := mem.SetMemoryLimit(ctx, memorySizeInMB); err != nil { + return nil, errors.Wrap(err, "failed to set memory limit") + } + + backingType := vm.MemoryBackingTypeVirtual + if !opts.AllowOvercommit { + backingType = vm.MemoryBackingTypePhysical + } + + if err := mem.SetMemoryConfig(&vm.MemoryConfig{ + BackingType: backingType, + DeferredCommit: opts.EnableDeferredCommit, + ColdDiscardHint: opts.EnableColdDiscardHint, + HotHint: opts.AllowOvercommit, + }); err != nil { + return nil, errors.Wrap(err, "failed to set memory config") + } + + proc, ok := uvmb.(vm.ProcessorManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping processor setup") + } + + if err := proc.SetProcessorCount(uint32(uvm.processorCount)); err != nil { + return nil, errors.Wrap(err, "failed to set processor count") } // Handle StorageQoS if set if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { - doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ - IopsMaximum: opts.StorageQoSIopsMaximum, - BandwidthMaximum: opts.StorageQoSBandwidthMaximum, + storage, ok := uvmb.(vm.StorageQosManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping storageqos setup") + } + if err := storage.SetStorageQos(int64(opts.StorageQoSIopsMaximum), int64(opts.StorageQoSBandwidthMaximum)); err != nil { + return nil, errors.Wrap(err, "failed to set storage qos config") } } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { - Attachments: make(map[string]hcsschema.Attachment), - }, + scsi, ok := uvmb.(vm.SCSIManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping SCSI setup") + } + for i := 0; i < int(uvm.scsiControllerCount); i++ { + if err := scsi.AddSCSIController(uint32(i)); err != nil { + return nil, errors.Wrap(err, "failed to add scsi controller") + } } } + if uvm.vpmemMaxCount > 0 { - doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ - MaximumCount: uvm.vpmemMaxCount, - MaximumSizeBytes: uvm.vpmemMaxSizeBytes, + vpmem, ok := uvmb.(vm.VPMemManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping VPMem setup") + } + if err := vpmem.AddVPMemController(uvm.vpmemMaxCount, uvm.vpmemMaxSizeBytes); err != nil { + return nil, errors.Wrap(err, "failed to add VPMem controller") } } @@ -280,17 +315,19 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error case PreferredRootFSTypeVHD: // Support for VPMem VHD(X) booting rather than initrd.. kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" - imageFormat := "Vhd1" + imageFormat := vm.VPMemImageFormatVHD1 if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { - imageFormat = "Vhdx" + imageFormat = vm.VPMemImageFormatVHDX } - doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ - "0": { - HostPath: rootfsFullPath, - ReadOnly: true, - ImageFormat: imageFormat, - }, + + vpmem, ok := uvmb.(vm.VPMemManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping VPMem setup") + } + if err := vpmem.AddVPMemDevice(ctx, 0, rootfsFullPath, true, imageFormat); err != nil { + return nil, errors.Wrap(err, "failed to add vpmem disk") } + // Add to our internal structure uvm.vpmemDevices[0] = &vpmemInfo{ hostPath: opts.RootFSFile, @@ -303,23 +340,19 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error if opts.ConsolePipe != "" { vmDebugging = true kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200" - doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{ - "0": { // Which is actually COM1 - NamedPipe: opts.ConsolePipe, - }, + + serial, ok := uvm.vm.(vm.SerialManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping serial console setup") + } + + if err := serial.SetSerialConsole(0, opts.ConsolePipe); err != nil { + return nil, errors.Wrap(err, "failed to add serial console config") } } else { kernelArgs += " 8250_core.nr_uarts=0" } - if opts.EnableGraphicsConsole { - vmDebugging = true - kernelArgs += " console=tty" - doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{} - doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{} - doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{} - } - if !vmDebugging { // Terminate the VM if there is a kernel panic. kernelArgs += " panic=-1 quiet" @@ -358,32 +391,31 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error kernelArgs += fmt.Sprintf(" nr_cpus=%d", opts.ProcessorCount) kernelArgs += ` brd.rd_nr=0 pmtmr=0 -- ` + initArgs - if !opts.KernelDirect { - doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{ - BootThis: &hcsschema.UefiBootEntry{ - DevicePath: `\` + opts.KernelFile, - DeviceType: "VmbFs", - VmbFsRootPath: opts.BootFilesPath, - OptionalData: kernelArgs, - }, + boot, ok := uvmb.(vm.BootManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping boot configuration") + } + if opts.KernelDirect { + var initFS string + if opts.PreferredRootFSType == PreferredRootFSTypeInitRd { + initFS = rootfsFullPath } - } else { - doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{ - KernelFilePath: kernelFullPath, - KernelCmdLine: kernelArgs, + if err := boot.SetLinuxKernelDirectBoot(kernelFullPath, initFS, kernelArgs); err != nil { + return nil, errors.Wrap(err, "failed to set Linux kernel direct boot") } - if opts.PreferredRootFSType == PreferredRootFSTypeInitRd { - doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath + } else { + if err := boot.SetUEFIBoot(opts.BootFilesPath, opts.KernelFile, kernelArgs); err != nil { + return nil, errors.Wrap(err, "failed to set UEFI boot") } } - err = uvm.create(ctx, doc) + uvm.vm, err = uvmb.Create(ctx, cOpts) if err != nil { - return nil, fmt.Errorf("error while creating the compute system: %s", err) + return nil, errors.Wrap(err, "failed to create virtual machine") } // Cerate a socket to inject entropy during boot. - uvm.entropyListener, err = uvm.listenVsock(entropyVsockPort) + uvm.entropyListener, err = uvm.listenVsock(ctx, entropyVsockPort) if err != nil { return nil, err } @@ -393,15 +425,15 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error if opts.ForwardStdout || opts.ForwardStderr { uvm.outputHandler = opts.OutputHandler uvm.outputProcessingDone = make(chan struct{}) - uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort) + uvm.outputListener, err = uvm.listenVsock(ctx, linuxLogVsockPort) if err != nil { return nil, err } } if opts.UseGuestConnection { - log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge") - l, err := uvm.listenVsock(gcs.LinuxGcsVsockPort) + log.G(ctx).WithField("vmID", uvm.vm.VmID()).Debug("Using external GCS bridge") + l, err := uvm.listenVsock(ctx, gcs.LinuxGcsVsockPort) if err != nil { return nil, err } @@ -421,9 +453,10 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error return uvm, nil } -func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) { - return winio.ListenHvsock(&winio.HvsockAddr{ - VMID: uvm.runtimeID, - ServiceID: winio.VsockServiceID(port), - }) +func (uvm *UtilityVM) listenVsock(ctx context.Context, port uint32) (net.Listener, error) { + vmsocket, ok := uvm.vm.(vm.VMSocketManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping vm socket configuration") + } + return vmsocket.VMSocketListen(ctx, vm.HvSocket, winio.VsockServiceID(port)) } diff --git a/internal/uvm/create_wcow.go b/internal/uvm/create_wcow.go index 439b290ee7..f772f1e95d 100644 --- a/internal/uvm/create_wcow.go +++ b/internal/uvm/create_wcow.go @@ -9,14 +9,15 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/gcs" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" - "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvmfolder" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/Microsoft/hcsshim/internal/vm/hcs" + "github.com/Microsoft/hcsshim/internal/vm/remotevm" "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/Microsoft/hcsshim/internal/wcow" "github.com/Microsoft/hcsshim/osversion" @@ -65,12 +66,14 @@ func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW { } func (uvm *UtilityVM) startExternalGcsListener(ctx context.Context) error { - log.G(ctx).WithField("vmID", uvm.runtimeID).Debug("Using external GCS bridge") + log.G(ctx).WithField("vmID", uvm.vm.VmID()).Debug("Using external GCS bridge") - l, err := winio.ListenHvsock(&winio.HvsockAddr{ - VMID: uvm.runtimeID, - ServiceID: gcs.WindowsGcsHvsockServiceID, - }) + vmsocket, ok := uvm.vm.(vm.VMSocketManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping vm socket operation") + } + + l, err := vmsocket.VMSocketListen(ctx, vm.HvSocket, gcs.WindowsGcsHvsockServiceID) if err != nil { return err } @@ -78,10 +81,10 @@ func (uvm *UtilityVM) startExternalGcsListener(ctx context.Context) error { return nil } -func prepareConfigDoc(ctx context.Context, uvm *UtilityVM, opts *OptionsWCOW, uvmFolder string) (*hcsschema.ComputeSystem, error) { +func prepareConfigDoc(ctx context.Context, uvm *UtilityVM, opts *OptionsWCOW, uvmFolder string) error { processorTopology, err := processorinfo.HostProcessorInfo(ctx) if err != nil { - return nil, fmt.Errorf("failed to get host processor information: %s", err) + return errors.Wrap(err, "failed to get host processor information") } // To maintain compatability with Docker we need to automatically downgrade @@ -91,109 +94,99 @@ func prepareConfigDoc(ctx context.Context, uvm *UtilityVM, opts *OptionsWCOW, uv // Align the requested memory size. memorySizeInMB := uvm.normalizeMemorySize(ctx, opts.MemorySizeInMB) + mem, ok := uvm.builder.(vm.MemoryManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping memory setup") + } + + if err := mem.SetMemoryLimit(ctx, memorySizeInMB); err != nil { + return errors.Wrap(err, "failed to set memory limit") + } + + backingType := vm.MemoryBackingTypeVirtual + if !opts.AllowOvercommit { + backingType = vm.MemoryBackingTypePhysical + } + + if err := mem.SetMemoryConfig(&vm.MemoryConfig{ + BackingType: backingType, + DeferredCommit: opts.EnableDeferredCommit, + HotHint: opts.AllowOvercommit, + }); err != nil { + return errors.Wrap(err, "failed to set memory config") + } + + vsmb, ok := uvm.builder.(vm.VSMBManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping VSMB operation") + } // UVM rootfs share is readonly. vsmbOpts := uvm.DefaultVSMBOptions(true) vsmbOpts.TakeBackupPrivilege = true - virtualSMB := &hcsschema.VirtualSmb{ - DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere - Shares: []hcsschema.VirtualSmbShare{ - { - Name: "os", - Path: filepath.Join(uvmFolder, `UtilityVM\Files`), - Options: vsmbOpts, - }, - }, + if opts.IsTemplate { + uvm.SetSaveableVSMBOptions(vsmbOpts, vsmbOpts.ReadOnly) } - // Here for a temporary workaround until the need for setting this regkey is no more. To protect - // against any undesired behavior (such as some general networking scenarios ceasing to function) - // with a recent change to fix SMB share access in the UVM, this registry key will be checked to - // enable the change in question inside GNS.dll. - var registryChanges hcsschema.RegistryChanges - if !opts.DisableCompartmentNamespace { - registryChanges = hcsschema.RegistryChanges{ - AddValues: []hcsschema.RegistryValue{ - { - Key: &hcsschema.RegistryKey{ - Hive: "System", - Name: "CurrentControlSet\\Services\\gns", - }, - Name: "EnableCompartmentNamespace", - DWordValue: 1, - Type_: "DWord", - }, - }, - } + if err := vsmb.AddVSMB( + ctx, + filepath.Join(uvmFolder, `UtilityVM\Files`), + "os", + nil, + vsmbOpts, + ); err != nil { + return errors.Wrap(err, "failed to set VSMB share on UVM document") + } + + cpu, ok := uvm.builder.(vm.ProcessorManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping cpu operation") } - processor := &hcsschema.Processor2{ - Count: uvm.processorCount, - Limit: opts.ProcessorLimit, - Weight: opts.ProcessorWeight, + limits := &vm.ProcessorLimits{ + Limit: uint64(opts.ProcessorLimit), + Weight: uint64(opts.ProcessorWeight), } + if err := cpu.SetProcessorLimits(ctx, limits); err != nil { + return errors.Wrap(err, "failed to set processor limit on UVM document") + } + if err := cpu.SetProcessorCount(uint32(uvm.processorCount)); err != nil { + return errors.Wrap(err, "failed to set processor count on UVM document") + } + // We can set a cpu group for the VM at creation time in recent builds. if opts.CPUGroupID != "" { if osversion.Build() < cpuGroupCreateBuild { - return nil, errCPUGroupCreateNotSupported + return errCPUGroupCreateNotSupported + } + windows, ok := uvm.builder.(vm.WindowsConfigManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping cpu groups operation") + } + if err := windows.SetCPUGroup(ctx, opts.CPUGroupID); err != nil { + return err } - processor.CpuGroup = &hcsschema.CpuGroup{Id: opts.CPUGroupID} } - doc := &hcsschema.ComputeSystem{ - Owner: uvm.owner, - SchemaVersion: schemaversion.SchemaV21(), - ShouldTerminateOnLastHandleClosed: true, - VirtualMachine: &hcsschema.VirtualMachine{ - StopOnReset: true, - Chipset: &hcsschema.Chipset{ - Uefi: &hcsschema.Uefi{ - BootThis: &hcsschema.UefiBootEntry{ - DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`, - DeviceType: "VmbFs", - }, - }, - }, - RegistryChanges: ®istryChanges, - ComputeTopology: &hcsschema.Topology{ - Memory: &hcsschema.Memory2{ - SizeInMB: memorySizeInMB, - AllowOvercommit: opts.AllowOvercommit, - // EnableHotHint is not compatible with physical. - EnableHotHint: opts.AllowOvercommit, - EnableDeferredCommit: opts.EnableDeferredCommit, - LowMMIOGapInMB: opts.LowMMIOGapInMB, - HighMMIOBaseInMB: opts.HighMMIOBaseInMB, - HighMMIOGapInMB: opts.HighMMIOGapInMB, - }, - Processor: processor, - }, - Devices: &hcsschema.Devices{ - HvSocket: &hcsschema.HvSocket2{ - HvSocketConfig: &hcsschema.HvSocketSystemConfig{ - // Allow administrators and SYSTEM to bind to vsock sockets - // so that we can create a GCS log socket. - DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", - }, - }, - VirtualSmb: virtualSMB, - }, - }, + storage, ok := uvm.builder.(vm.StorageQosManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping storage qos operation") } // Handle StorageQoS if set if opts.StorageQoSBandwidthMaximum > 0 || opts.StorageQoSIopsMaximum > 0 { - doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ - IopsMaximum: opts.StorageQoSIopsMaximum, - BandwidthMaximum: opts.StorageQoSBandwidthMaximum, + if err := storage.SetStorageQos( + int64(opts.StorageQoSIopsMaximum), + int64(opts.StorageQoSBandwidthMaximum), + ); err != nil { + return err } } - return doc, nil + return nil } -// CreateWCOW creates an HCS compute system representing a utility VM. -// The HCS Compute system can either be created from scratch or can be cloned from a -// template. +// CreateWCOW creates a Windows utility VM. +// The UVM can either be created from scratch or can be cloned from a template. // // WCOW Notes: // - The scratch is always attached to SCSI 0:0 @@ -234,6 +227,28 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } }() + var ( + uvmb vm.UVMBuilder + cOpts []vm.CreateOpt + ) + switch opts.VMSource { + case vm.HCS: + uvmb, err = hcs.NewUVMBuilder(uvm.id, uvm.owner, vm.Windows) + if err != nil { + return nil, errors.Wrap(err, "failed to create UVM builder") + } + cOpts = applyHcsOpts(opts) + case vm.RemoteVM: + uvmb, err = remotevm.NewUVMBuilder(ctx, uvm.id, uvm.owner, opts.VMServicePath, opts.VMServiceAddress, vm.Windows) + if err != nil { + return nil, errors.Wrap(err, "failed to create UVM builder") + } + cOpts = applyRemoteVMOpts(opts.Options) + default: + return nil, fmt.Errorf("unknown VM source: %s", opts.VMSource) + } + uvm.builder = uvmb + if err := verifyOptions(ctx, opts); err != nil { return nil, errors.Wrap(err, errBadUVMOpts.Error()) } @@ -258,9 +273,8 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } } - doc, err := prepareConfigDoc(ctx, uvm, opts, uvmFolder) - if err != nil { - return nil, fmt.Errorf("error in preparing config doc: %s", err) + if err := prepareConfigDoc(ctx, uvm, opts, uvmFolder); err != nil { + return nil, errors.Wrap(err, "error in preparing config doc") } if !opts.IsClone { @@ -277,30 +291,27 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } } - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { - Attachments: map[string]hcsschema.Attachment{ - "0": { - Path: scratchPath, - Type_: "VirtualDisk", - }, - }, - }, + scsi, ok := uvm.builder.(vm.SCSIManager) + if !ok { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping scsi operation") + } + if err := scsi.AddSCSIController(0); err != nil { + return nil, err + } + if err := scsi.AddSCSIDisk(ctx, 0, 0, scratchPath, vm.SCSIDiskTypeVHDX, false); err != nil { + return nil, err } - uvm.scsiLocations[0][0] = newSCSIMount(uvm, doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path, "", "", 1, 0, 0, false) + uvm.scsiLocations[0][0] = newSCSIMount(uvm, scratchPath, "", "", 1, 0, 0, false) } else { - doc.VirtualMachine.RestoreState = &hcsschema.RestoreState{} - doc.VirtualMachine.RestoreState.TemplateSystemId = opts.TemplateConfig.UVMID - for _, cloneableResource := range opts.TemplateConfig.Resources { err = cloneableResource.Clone(ctx, uvm, &cloneData{ - doc: doc, + builder: uvmb, scratchFolder: scratchFolder, uvmID: opts.ID, }) if err != nil { - return nil, fmt.Errorf("failed while cloning: %s", err) + return nil, errors.Wrap(err, "failed while cloning resource") } } @@ -314,18 +325,11 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error uvm.IsClone = true uvm.TemplateID = opts.TemplateConfig.UVMID } + uvm.IsTemplate = opts.IsTemplate - // Add appropriate VSMB share options if this UVM needs to be saved as a template - if opts.IsTemplate { - for _, share := range doc.VirtualMachine.Devices.VirtualSmb.Shares { - uvm.SetSaveableVSMBOptions(share.Options, share.Options.ReadOnly) - } - uvm.IsTemplate = true - } - - err = uvm.create(ctx, doc) + uvm.vm, err = uvmb.Create(ctx, cOpts) if err != nil { - return nil, fmt.Errorf("error while creating the compute system: %s", err) + return nil, errors.Wrap(err, "error while creating the Utility VM: %s") } if err = uvm.startExternalGcsListener(ctx); err != nil { diff --git a/internal/uvm/guest_request.go b/internal/uvm/guest_request.go index 5459859453..06fdbaf503 100644 --- a/internal/uvm/guest_request.go +++ b/internal/uvm/guest_request.go @@ -3,13 +3,13 @@ package uvm import ( "context" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/pkg/errors" ) -// GuestRequest send an arbitrary guest request to the UVM. +// GuestRequest sends an arbitrary guest request to the UVM. func (uvm *UtilityVM) GuestRequest(ctx context.Context, guestReq interface{}) error { - msr := &hcsschema.ModifySettingRequest{ - GuestRequest: guestReq, + if err := uvm.gc.Modify(ctx, guestReq); err != nil { + return errors.Wrap(err, "guest modify request failed") } - return uvm.modify(ctx, msr) + return nil } diff --git a/internal/uvm/hvsocket.go b/internal/uvm/hvsocket.go index 48f7730761..ab444eeb16 100644 --- a/internal/uvm/hvsocket.go +++ b/internal/uvm/hvsocket.go @@ -2,17 +2,14 @@ package uvm import ( "context" - "fmt" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" ) -// UpdateHvSocketService calls HCS to update/create the hvsocket service for -// the UVM. Takes in a service ID and the hvsocket service configuration. If there is no -// entry for the service ID already it will be created. The same call on HvSockets side -// handles the Create/Update/Delete cases based on what is passed in. Here is the logic +// UpdateHvSocketService updates/creates the hvsocket service for the UVM. Takes in a service ID and +// the hvsocket service configuration. If there is no entry for the service ID already it will be created. +// The same call on HvSockets side handles the Create/Update/Delete cases based on what is passed in. Here is the logic // for the call. // // 1. If the service ID does not currently exist in the service table, it will be created @@ -23,22 +20,11 @@ import ( // service. // // If the request is crafted with Disabled = True and empty descriptors, then this function -// will behave identically to a call to RemoveHvSocketService. Prefer RemoveHvSocketService for this -// behavior as the relevant fields are set on HCS' side. -func (uvm *UtilityVM) UpdateHvSocketService(ctx context.Context, sid string, doc *hcsschema.HvSocketServiceConfig) error { - request := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Update, - ResourcePath: fmt.Sprintf(resourcepaths.HvSocketConfigResourceFormat, sid), - Settings: doc, +// will remove the hvsocket service entry. +func (uvm *UtilityVM) UpdateHvSocketService(ctx context.Context, sid string, doc *vm.HvSocketServiceConfig) error { + vmsocket, ok := uvm.vm.(vm.VMSocketManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping vmsocket operation") } - return uvm.modify(ctx, request) -} - -// RemoveHvSocketService will remove an hvsocket service entry if it exists. -func (uvm *UtilityVM) RemoveHvSocketService(ctx context.Context, sid string) error { - request := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.HvSocketConfigResourceFormat, sid), - } - return uvm.modify(ctx, request) + return vmsocket.UpdateVMSocket(ctx, vm.HvSocket, sid, doc) } diff --git a/internal/uvm/memory_update.go b/internal/uvm/memory_update.go index 058ffff013..f4ac8392f4 100644 --- a/internal/uvm/memory_update.go +++ b/internal/uvm/memory_update.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" ) const ( @@ -19,26 +19,27 @@ const ( func (uvm *UtilityVM) UpdateMemory(ctx context.Context, sizeInBytes uint64) error { requestedSizeInMB := sizeInBytes / bytesPerMB actual := uvm.normalizeMemorySize(ctx, requestedSizeInMB) - req := &hcsschema.ModifySettingRequest{ - ResourcePath: resourcepaths.MemoryResourcePath, - Settings: actual, + mem, ok := uvm.vm.(vm.MemoryManager) + if !ok || !uvm.vm.Supported(vm.Memory, vm.Update) { + return errors.Wrap(vm.ErrNotSupported, "stopping update of memory") } - return uvm.modify(ctx, req) + return mem.SetMemoryLimit(ctx, actual) } // GetAssignedMemoryInBytes returns the amount of assigned memory for the UVM in bytes func (uvm *UtilityVM) GetAssignedMemoryInBytes(ctx context.Context) (uint64, error) { - props, err := uvm.hcsSystem.PropertiesV2(ctx, hcsschema.PTMemory) + stats, err := uvm.vm.Stats(ctx) if err != nil { - return 0, err + return 0, errors.Wrap(err, "failed to fetch Utility VM stats") } - if props.Memory == nil { + if stats.Memory == nil { return 0, fmt.Errorf("no memory properties returned for system %s", uvm.id) } - if props.Memory.VirtualMachineMemory == nil { + + if stats.Memory.VmMemory == nil { return 0, fmt.Errorf("no virtual memory properties returned for system %s", uvm.id) } - pages := props.Memory.VirtualMachineMemory.AssignedMemory + pages := stats.Memory.VmMemory.AssignedMemory if pages == 0 { return 0, fmt.Errorf("assigned memory returned should not be 0 for system %s", uvm.id) } diff --git a/internal/uvm/modify.go b/internal/uvm/modify.go index 72b391756c..15e747d1a9 100644 --- a/internal/uvm/modify.go +++ b/internal/uvm/modify.go @@ -1,48 +1 @@ package uvm - -import ( - "context" - "fmt" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/requesttype" -) - -// Modify modifies the compute system by sending a request to HCS. -func (uvm *UtilityVM) modify(ctx context.Context, doc *hcsschema.ModifySettingRequest) (err error) { - if doc.GuestRequest == nil || uvm.gc == nil { - return uvm.hcsSystem.Modify(ctx, doc) - } - - hostdoc := *doc - hostdoc.GuestRequest = nil - if doc.ResourcePath != "" && doc.RequestType == requesttype.Add { - err = uvm.hcsSystem.Modify(ctx, &hostdoc) - if err != nil { - return fmt.Errorf("adding VM resources: %s", err) - } - defer func() { - if err != nil { - hostdoc.RequestType = requesttype.Remove - rerr := uvm.hcsSystem.Modify(ctx, &hostdoc) - if rerr != nil { - log.G(ctx).WithError(rerr).Error("failed to roll back resource add") - } - } - }() - } - err = uvm.gc.Modify(ctx, doc.GuestRequest) - if err != nil { - return fmt.Errorf("guest modify: %s", err) - } - if doc.ResourcePath != "" && doc.RequestType == requesttype.Remove { - err = uvm.hcsSystem.Modify(ctx, &hostdoc) - if err != nil { - err = fmt.Errorf("removing VM resources: %s", err) - log.G(ctx).WithError(err).Error("failed to remove host resources after successful guest request") - return err - } - } - return nil -} diff --git a/internal/uvm/network.go b/internal/uvm/network.go index fcec86de66..42526d53b7 100644 --- a/internal/uvm/network.go +++ b/internal/uvm/network.go @@ -2,16 +2,16 @@ package uvm import ( "context" - "errors" "fmt" "os" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/hcn" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/hns" "github.com/Microsoft/hcsshim/internal/log" @@ -319,14 +319,12 @@ func (uvm *UtilityVM) AddNetNS(ctx context.Context, hcnNamespace *hcn.HostComput // Add a Guest Network namespace. On LCOW we add the adapters // dynamically. if uvm.operatingSystem == "windows" { - guestNamespace := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeNetworkNamespace, - RequestType: requesttype.Add, - Settings: hcnNamespace, - }, + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetworkNamespace, + RequestType: requesttype.Add, + Settings: hcnNamespace, } - if err := uvm.modify(ctx, &guestNamespace); err != nil { + if err := uvm.GuestRequest(ctx, guestReq); err != nil { return err } } @@ -350,11 +348,7 @@ func (uvm *UtilityVM) AddNetNSByID(ctx context.Context, id string) error { if err != nil { return err } - - if err = uvm.AddNetNS(ctx, hcnNamespace); err != nil { - return err - } - return nil + return uvm.AddNetNS(ctx, hcnNamespace) } // AddEndpointToNSWithID adds an endpoint to the network namespace with the specified @@ -440,14 +434,12 @@ func (uvm *UtilityVM) RemoveNetNS(ctx context.Context, id string) error { if err != nil { return err } - guestNamespace := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeNetworkNamespace, - RequestType: requesttype.Remove, - Settings: hcnNamespace, - }, + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetworkNamespace, + RequestType: requesttype.Remove, + Settings: hcnNamespace, } - if err := uvm.modify(ctx, &guestNamespace); err != nil { + if err := uvm.GuestRequest(ctx, guestReq); err != nil { return err } } @@ -529,35 +521,24 @@ func getNetworkModifyRequest(adapterID string, requestType string, settings inte // addNIC adds a nic to the Utility VM. func (uvm *UtilityVM) addNIC(ctx context.Context, id string, endpoint *hns.HNSEndpoint) error { + var guestReq guestrequest.GuestRequest // First a pre-add. This is a guest-only request and is only done on Windows. if uvm.operatingSystem == "windows" { - preAddRequest := hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeNetwork, - RequestType: requesttype.Add, - Settings: getNetworkModifyRequest( - id, - requesttype.PreAdd, - endpoint), - }, + preAddReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetwork, + RequestType: requesttype.Add, + Settings: getNetworkModifyRequest( + id, + requesttype.PreAdd, + endpoint), } - if err := uvm.modify(ctx, &preAddRequest); err != nil { - return err + if err := uvm.GuestRequest(ctx, preAddReq); err != nil { + return errors.Wrap(err, "failed pre-add for network adapter") } } - // Then the Add itself - request := hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: hcsschema.NetworkAdapter{ - EndpointId: endpoint.Id, - MacAddress: endpoint.MacAddress, - }, - } - if uvm.operatingSystem == "windows" { - request.GuestRequest = guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeNetwork, RequestType: requesttype.Add, Settings: getNetworkModifyRequest( @@ -568,7 +549,7 @@ func (uvm *UtilityVM) addNIC(ctx context.Context, id string, endpoint *hns.HNSEn } else { // Verify this version of LCOW supports Network HotAdd if uvm.isNetworkNamespaceSupported() { - request.GuestRequest = guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeNetwork, RequestType: requesttype.Add, Settings: &guestrequest.LCOWNetworkAdapter{ @@ -587,25 +568,20 @@ func (uvm *UtilityVM) addNIC(ctx context.Context, id string, endpoint *hns.HNSEn } } - if err := uvm.modify(ctx, &request); err != nil { - return err + network, ok := uvm.vm.(vm.NetworkManager) + if !ok || !uvm.vm.Supported(vm.Network, vm.Add) { + return errors.Wrap(vm.ErrNotSupported, "stopping network adapter add") } - - return nil + if err := network.AddNIC(ctx, id, endpoint.Id, endpoint.MacAddress); err != nil { + return errors.Wrap(err, "failed to add NIC to Utility VM") + } + return uvm.GuestRequest(ctx, guestReq) } func (uvm *UtilityVM) removeNIC(ctx context.Context, id string, endpoint *hns.HNSEndpoint) error { - request := hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: hcsschema.NetworkAdapter{ - EndpointId: endpoint.Id, - MacAddress: endpoint.MacAddress, - }, - } - + var guestReq interface{} if uvm.operatingSystem == "windows" { - request.GuestRequest = hcsschema.ModifySettingRequest{ + guestReq = hcsschema.ModifySettingRequest{ RequestType: requesttype.Remove, Settings: getNetworkModifyRequest( id, @@ -615,7 +591,7 @@ func (uvm *UtilityVM) removeNIC(ctx context.Context, id string, endpoint *hns.HN } else { // Verify this version of LCOW supports Network HotRemove if uvm.isNetworkNamespaceSupported() { - request.GuestRequest = guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeNetwork, RequestType: requesttype.Remove, Settings: &guestrequest.LCOWNetworkAdapter{ @@ -626,8 +602,16 @@ func (uvm *UtilityVM) removeNIC(ctx context.Context, id string, endpoint *hns.HN } } - if err := uvm.modify(ctx, &request); err != nil { - return err + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return errors.Wrap(err, "failed to remove NIC from the guest") + } + + network, ok := uvm.vm.(vm.NetworkManager) + if !ok || !uvm.vm.Supported(vm.Network, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping network adapter removal") + } + if err := network.RemoveNIC(ctx, id, endpoint.Id, endpoint.MacAddress); err != nil { + return errors.Wrap(err, "failed to remove NIC from Utility VM") } return nil } @@ -645,11 +629,13 @@ func (uvm *UtilityVM) RemoveAllNICs(ctx context.Context) error { } // UpdateNIC updates a UVM's network adapter. -func (uvm *UtilityVM) UpdateNIC(ctx context.Context, id string, settings *hcsschema.NetworkAdapter) error { - req := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Update, - ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, id), - Settings: settings, +func (uvm *UtilityVM) UpdateNIC(ctx context.Context, id string, settings *vm.NetworkAdapter) error { + network, ok := uvm.vm.(vm.NetworkManager) + if !ok || !uvm.vm.Supported(vm.Network, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping network adapter removal") + } + if err := network.UpdateNIC(ctx, id, settings); err != nil { + return errors.Wrap(err, "failed to update NIC on Utility VM") } - return uvm.modify(ctx, req) + return nil } diff --git a/internal/uvm/pipes.go b/internal/uvm/pipes.go index ba08aedbdb..18ebd316ce 100644 --- a/internal/uvm/pipes.go +++ b/internal/uvm/pipes.go @@ -5,10 +5,9 @@ import ( "fmt" "strings" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) const pipePrefix = `\\.\pipe\` @@ -30,24 +29,24 @@ func (pipe *PipeMount) Release(ctx context.Context) error { // AddPipe shares a named pipe into the UVM. func (uvm *UtilityVM) AddPipe(ctx context.Context, hostPath string) (*PipeMount, error) { - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), + pipe, ok := uvm.vm.(vm.PipeManager) + if !ok || !uvm.vm.Supported(vm.Pipe, vm.Add) { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping pipe mount add") } - if err := uvm.modify(ctx, modification); err != nil { - return nil, err + if err := pipe.AddPipe(ctx, hostPath); err != nil { + return nil, errors.Wrap(err, "failed to add pipe mount") } return &PipeMount{uvm, hostPath}, nil } // RemovePipe removes a shared named pipe from the UVM. func (uvm *UtilityVM) RemovePipe(ctx context.Context, hostPath string) error { - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), + pipe, ok := uvm.vm.(vm.PipeManager) + if !ok || !uvm.vm.Supported(vm.Pipe, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping pipe mount removal") } - if err := uvm.modify(ctx, modification); err != nil { - return err + if err := pipe.RemovePipe(ctx, hostPath); err != nil { + return errors.Wrap(err, "failed to remove pipe mount") } return nil } diff --git a/internal/uvm/plan9.go b/internal/uvm/plan9.go index 755fbdb731..46baad02b7 100644 --- a/internal/uvm/plan9.go +++ b/internal/uvm/plan9.go @@ -2,15 +2,14 @@ package uvm import ( "context" - "errors" "fmt" "strconv" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" ) // Plan9Share is a struct containing host paths for the UVM @@ -23,7 +22,7 @@ type Plan9Share struct { // Release frees the resources of the corresponding Plan9 share func (p9 *Plan9Share) Release(ctx context.Context) error { if err := p9.vm.RemovePlan9(ctx, p9); err != nil { - return fmt.Errorf("failed to remove plan9 share: %s", err) + return errors.Wrap(err, "failed to remove plan9 share") } return nil } @@ -68,30 +67,27 @@ func (uvm *UtilityVM) AddPlan9(ctx context.Context, hostPath string, uvmPath str uvm.m.Unlock() name := strconv.FormatUint(index, 10) - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - Settings: hcsschema.Plan9Share{ - Name: name, - AccessName: name, - Path: hostPath, - Port: plan9Port, - Flags: flags, - AllowedFiles: allowedNames, - }, - ResourcePath: resourcepaths.Plan9ShareResourcePath, - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeMappedDirectory, - RequestType: requesttype.Add, - Settings: guestrequest.LCOWMappedDirectory{ - MountPath: uvmPath, - ShareName: name, - Port: plan9Port, - ReadOnly: readOnly, - }, + plan9, ok := uvm.vm.(vm.Plan9Manager) + if !ok || !uvm.vm.Supported(vm.Plan9, vm.Add) { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping plan 9 share add") + } + + if err := plan9.AddPlan9(ctx, hostPath, name, plan9Port, flags, allowedNames); err != nil { + return nil, errors.Wrap(err, "failed to add plan 9 share") + } + + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedDirectory, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedDirectory{ + MountPath: uvmPath, + ShareName: name, + Port: plan9Port, + ReadOnly: readOnly, }, } - if err := uvm.modify(ctx, modification); err != nil { + if err := uvm.GuestRequest(ctx, guestReq); err != nil { return nil, err } @@ -109,26 +105,26 @@ func (uvm *UtilityVM) RemovePlan9(ctx context.Context, share *Plan9Share) error return errNotSupported } - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - Settings: hcsschema.Plan9Share{ - Name: share.name, - AccessName: share.name, - Port: plan9Port, - }, - ResourcePath: resourcepaths.Plan9ShareResourcePath, - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeMappedDirectory, - RequestType: requesttype.Remove, - Settings: guestrequest.LCOWMappedDirectory{ - MountPath: share.uvmPath, - ShareName: share.name, - Port: plan9Port, - }, + plan9, ok := uvm.vm.(vm.Plan9Manager) + if !ok || !uvm.vm.Supported(vm.Plan9, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping plan 9 share removal") + } + + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedDirectory, + RequestType: requesttype.Remove, + Settings: guestrequest.LCOWMappedDirectory{ + MountPath: share.uvmPath, + ShareName: share.name, + Port: plan9Port, }, } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", share.name, uvm.id, modification, err) + + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", share.name, uvm.id, guestReq, err) + } + if err := plan9.RemovePlan9(ctx, share.name, plan9Port); err != nil { + return errors.Wrap(err, "failed to remove plan 9 share") } return nil } diff --git a/internal/uvm/scsi.go b/internal/uvm/scsi.go index fdb9c8bc13..d44378ed75 100644 --- a/internal/uvm/scsi.go +++ b/internal/uvm/scsi.go @@ -8,15 +8,13 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "github.com/Microsoft/go-winio/pkg/security" "github.com/Microsoft/hcsshim/internal/copyfile" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -172,11 +170,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { return nil } - scsiModification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), - } - + var guestReq guestrequest.GuestRequest // Include the GuestRequest so that the GCS ejects the disk cleanly if the // disk was attached/mounted // @@ -184,7 +178,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { // so that we synchronize the guest state. This seems to always avoid SCSI // related errors if this index quickly reused by another container. if uvm.operatingSystem == "windows" && sm.UVMPath != "" { - scsiModification.GuestRequest = guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, RequestType: requesttype.Remove, Settings: guestrequest.WCOWMappedVirtualDisk{ @@ -193,7 +187,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { }, } } else { - scsiModification.GuestRequest = guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, RequestType: requesttype.Remove, Settings: guestrequest.LCOWMappedVirtualDisk{ @@ -204,9 +198,17 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { } } - if err := uvm.modify(ctx, scsiModification); err != nil { - return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err) + scsi, ok := uvm.vm.(vm.SCSIManager) + if !ok || !uvm.vm.Supported(vm.SCSI, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping SCSI disk removal") + } + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return errors.Wrap(err, "failed guest request to remove SCSI disk: %s") } + if err := scsi.RemoveSCSIDisk(ctx, uint32(sm.Controller), uint32(sm.LUN), hostPath); err != nil { + return errors.Wrap(err, "failed to remove SCSI disk") + } + log.G(ctx).WithFields(sm.logFormat()).Debug("removed SCSI location") uvm.scsiLocations[sm.Controller][sm.LUN] = nil return nil @@ -287,18 +289,9 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, atta return nil, ErrTooManyAttachments } - SCSIModification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - Settings: hcsschema.Attachment{ - Path: sm.HostPath, - Type_: attachmentType, - ReadOnly: readOnly, - }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), - } - + var guestReq guestrequest.GuestRequest if sm.UVMPath != "" { - guestReq := guestrequest.GuestRequest{ + guestReq = guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, RequestType: requesttype.Add, } @@ -317,11 +310,34 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, atta Options: guestOptions, } } - SCSIModification.GuestRequest = guestReq } - if err := uvm.modify(ctx, SCSIModification); err != nil { - return nil, fmt.Errorf("failed to modify UVM with new SCSI mount: %s", err) + var diskType vm.SCSIDiskType + switch attachmentType { + case "VirtualDisk": + switch ext := filepath.Ext(sm.HostPath); ext { + case ".vhd": + diskType = vm.SCSIDiskTypeVHD1 + case ".vhdx": + diskType = vm.SCSIDiskTypeVHDX + default: + return nil, fmt.Errorf("unsupported extension for virtual disk: %s", ext) + } + case "PassThru": + diskType = vm.SCSIDiskTypePassThrough + default: + return nil, fmt.Errorf("unsupported SCSI disk type: %s", attachmentType) + } + + scsi, ok := uvm.vm.(vm.SCSIManager) + if !ok || !uvm.vm.Supported(vm.SCSI, vm.Add) { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping SCSI disk add") + } + if err := scsi.AddSCSIDisk(ctx, uint32(sm.Controller), uint32(sm.LUN), sm.HostPath, diskType, readOnly); err != nil { + return nil, errors.Wrap(err, "failed to add SCSI disk") + } + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return nil, errors.Wrap(err, "failed guest request to add SCSI disk: %s") } return sm, nil } @@ -454,13 +470,11 @@ func (sm *SCSIMount) GobDecode(data []byte) error { // the uvm `vm`. If `sm` is read only then it is simply added to the `vm`. But if it is a // writeable mount(e.g a scratch layer) then a copy of it is made and that copy is added // to the `vm`. -func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { +func (sm *SCSIMount) Clone(ctx context.Context, uvm *UtilityVM, cd *cloneData) error { var ( dstVhdPath string = sm.HostPath err error dir string - conStr string = fmt.Sprintf("%d", sm.Controller) - lunStr string = fmt.Sprintf("%d", sm.LUN) ) if !sm.readOnly { @@ -509,25 +523,26 @@ func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) er } } - if cd.doc.VirtualMachine.Devices.Scsi == nil { - cd.doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + scsiMgr, ok := cd.builder.(vm.SCSIManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping scsi operation") } - - if _, ok := cd.doc.VirtualMachine.Devices.Scsi[conStr]; !ok { - cd.doc.VirtualMachine.Devices.Scsi[conStr] = hcsschema.Scsi{ - Attachments: map[string]hcsschema.Attachment{}, - } + if err := scsiMgr.AddSCSIController(uint32(sm.Controller)); err != nil { + return err } - - cd.doc.VirtualMachine.Devices.Scsi[conStr].Attachments[lunStr] = hcsschema.Attachment{ - Path: dstVhdPath, - Type_: sm.attachmentType, + if err := scsiMgr.AddSCSIDisk( + ctx, + uint32(sm.Controller), + uint32(sm.LUN), + dstVhdPath, + vm.SCSIDiskTypeVHDX, + sm.readOnly, + ); err != nil { + return err } - clonedScsiMount := newSCSIMount(vm, dstVhdPath, sm.UVMPath, sm.attachmentType, 1, sm.Controller, sm.LUN, sm.readOnly) - - vm.scsiLocations[sm.Controller][sm.LUN] = clonedScsiMount - + clonedScsiMount := newSCSIMount(uvm, dstVhdPath, sm.UVMPath, sm.attachmentType, 1, sm.Controller, sm.LUN, sm.readOnly) + uvm.scsiLocations[sm.Controller][sm.LUN] = clonedScsiMount return nil } diff --git a/internal/uvm/start.go b/internal/uvm/start.go index ef76b39ae1..40cadbdc73 100644 --- a/internal/uvm/start.go +++ b/internal/uvm/start.go @@ -15,11 +15,11 @@ import ( "github.com/Microsoft/hcsshim/internal/gcs" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -119,7 +119,7 @@ func parseLogrus(vmid string) func(r io.Reader) { } // When using an external GCS connection it is necessary to send a ModifySettings request -// for HvSockt so that the GCS can setup some registry keys that are required for running +// for HvSocket so that the GCS can setup some registry keys that are required for running // containers inside the UVM. In non external GCS connection scenarios this is done by the // HCS immediately after the GCS connection is done. Since, we are using the external GCS // connection we should do that setup here after we connect with the GCS. @@ -128,24 +128,17 @@ func (uvm *UtilityVM) configureHvSocketForGCS(ctx context.Context) (err error) { if uvm.OS() != "windows" { return nil } - - hvsocketAddress := &hcsschema.HvSocketAddress{ - LocalAddress: uvm.runtimeID.String(), - ParentAddress: gcs.WindowsGcsHvHostID.String(), - } - - conSetupReq := &hcsschema.ModifySettingRequest{ - GuestRequest: guestrequest.GuestRequest{ - RequestType: requesttype.Update, - ResourceType: guestrequest.ResourceTypeHvSocket, - Settings: hvsocketAddress, + guestReq := guestrequest.GuestRequest{ + RequestType: requesttype.Update, + ResourceType: guestrequest.ResourceTypeHvSocket, + Settings: &hcsschema.HvSocketAddress{ + LocalAddress: uvm.vm.VmID(), + ParentAddress: gcs.WindowsGcsHvHostID.String(), }, } - - if err = uvm.modify(ctx, conSetupReq); err != nil { - return fmt.Errorf("failed to configure HVSOCK for external GCS: %s", err) + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return errors.Wrap(err, "failed to configure HVSOCK for external GCS") } - return nil } @@ -194,24 +187,21 @@ func (uvm *UtilityVM) Start(ctx context.Context) (err error) { }) } - err = uvm.hcsSystem.Start(ctx) + err = uvm.vm.Start(ctx) if err != nil { return err } defer func() { if err != nil { - _ = uvm.hcsSystem.Terminate(ctx) - _ = uvm.hcsSystem.Wait() + _ = uvm.vm.Stop(ctx) + _ = uvm.vm.Wait() } }() // Start waiting on the utility VM. uvm.exitCh = make(chan struct{}) go func() { - err := uvm.hcsSystem.Wait() - if err == nil { - err = uvm.hcsSystem.ExitError() - } + err := uvm.vm.Wait() uvm.exitErr = err close(uvm.exitCh) }() @@ -231,9 +221,11 @@ func (uvm *UtilityVM) Start(ctx context.Context) (err error) { } // Start the GCS protocol. gcc := &gcs.GuestConnectionConfig{ - Conn: conn, - Log: log.G(ctx).WithField(logfields.UVMID, uvm.id), - IoListen: gcs.HvsockIoListen(uvm.runtimeID), + Conn: conn, + Log: log.G(ctx).WithField(logfields.UVMID, uvm.id), + IoListen: func(port uint32) (net.Listener, error) { + return uvm.listenVsock(context.Background(), port) + }, } uvm.gc, err = gcc.Connect(ctx, !uvm.IsClone) if err != nil { @@ -244,16 +236,8 @@ func (uvm *UtilityVM) Start(ctx context.Context) (err error) { // initial setup required for external GCS connection if err = uvm.configureHvSocketForGCS(ctx); err != nil { - return fmt.Errorf("failed to do initial GCS setup: %s", err) - } - } else { - // Cache the guest connection properties. - properties, err := uvm.hcsSystem.Properties(ctx, schema1.PropertyTypeGuestConnection) - if err != nil { - return err + return errors.Wrap(err, "failed to do initial GCS setup") } - uvm.guestCaps = properties.GuestConnectionInfo.GuestDefinedCapabilities - uvm.protocol = properties.GuestConnectionInfo.ProtocolVersion } return nil diff --git a/internal/uvm/stats.go b/internal/uvm/stats.go index 73bb65da89..c101a05f14 100644 --- a/internal/uvm/stats.go +++ b/internal/uvm/stats.go @@ -2,154 +2,11 @@ package uvm import ( "context" - "strings" - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/go-winio/pkg/process" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" ) -// checkProcess checks if the process identified by the given pid has a name -// matching `desiredProcessName`, and is running as a user with domain -// `desiredDomain` and user name `desiredUser`. If the process matches, it -// returns a handle to the process. If the process does not match, it returns -// 0. -func checkProcess(ctx context.Context, pid uint32, desiredProcessName string, desiredDomain string, desiredUser string) (p windows.Handle, err error) { - desiredProcessName = strings.ToUpper(desiredProcessName) - desiredDomain = strings.ToUpper(desiredDomain) - desiredUser = strings.ToUpper(desiredUser) - - p, err = windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION|windows.PROCESS_VM_READ, false, pid) - if err != nil { - return 0, err - } - defer func(openedProcess windows.Handle) { - // If we don't return this process handle, close it so it doesn't leak. - if p == 0 { - windows.Close(openedProcess) - } - }(p) - // Querying vmmem's image name as a win32 path returns ERROR_GEN_FAILURE - // for some reason, so we query it as an NT path instead. - name, err := process.QueryFullProcessImageName(p, process.ImageNameFormatNTPath) - if err != nil { - return 0, err - } - if strings.ToUpper(name) == desiredProcessName { - var t windows.Token - if err := windows.OpenProcessToken(p, windows.TOKEN_QUERY, &t); err != nil { - return 0, err - } - defer t.Close() - tUser, err := t.GetTokenUser() - if err != nil { - return 0, err - } - user, domain, _, err := tUser.User.Sid.LookupAccount("") - if err != nil { - return 0, err - } - log.G(ctx).WithFields(logrus.Fields{ - "name": name, - "domain": domain, - "user": user, - }).Debug("checking vmmem process identity") - if strings.ToUpper(domain) == desiredDomain && strings.ToUpper(user) == desiredUser { - return p, nil - } - } - return 0, nil -} - -// lookupVMMEM locates the vmmem process for a VM given the VM ID. It returns -// a handle to the vmmem process. The lookup is implemented by enumerating all -// processes on the system, and finding a process with full name "vmmem", -// running as "NT VIRTUAL MACHINE\". -func lookupVMMEM(ctx context.Context, vmID guid.GUID) (proc windows.Handle, err error) { - vmIDStr := strings.ToUpper(vmID.String()) - log.G(ctx).WithField("vmID", vmIDStr).Debug("looking up vmmem") - - pids, err := process.EnumProcesses() - if err != nil { - return 0, errors.Wrap(err, "failed to enumerate processes") - } - for _, pid := range pids { - p, err := checkProcess(ctx, pid, "vmmem", "NT VIRTUAL MACHINE", vmIDStr) - if err != nil { - // Checking the process could fail for a variety of reasons, such as - // the process exiting since we called EnumProcesses, or not having - // access to open the process (even as SYSTEM). In the case of an - // error, we just log and continue looking at the other processes. - log.G(ctx).WithField("pid", pid).Debug("failed to check process") - continue - } - if p != 0 { - log.G(ctx).WithField("pid", pid).Debug("found vmmem match") - return p, nil - } - } - return 0, errors.New("failed to find matching vmmem process") -} - -// getVMMEMProcess returns a handle to the vmmem process associated with this -// UVM. It only does the actual process lookup once, after which it caches the -// process handle in the UVM object. -func (uvm *UtilityVM) getVMMEMProcess(ctx context.Context) (windows.Handle, error) { - uvm.vmmemOnce.Do(func() { - uvm.vmmemProcess, uvm.vmmemErr = lookupVMMEM(ctx, uvm.runtimeID) - }) - return uvm.vmmemProcess, uvm.vmmemErr -} - // Stats returns various UVM statistics. func (uvm *UtilityVM) Stats(ctx context.Context) (*stats.VirtualMachineStatistics, error) { - s := &stats.VirtualMachineStatistics{} - props, err := uvm.hcsSystem.PropertiesV2(ctx, hcsschema.PTStatistics, hcsschema.PTMemory) - if err != nil { - return nil, err - } - s.Processor = &stats.VirtualMachineProcessorStatistics{} - s.Processor.TotalRuntimeNS = uint64(props.Statistics.Processor.TotalRuntime100ns * 100) - - s.Memory = &stats.VirtualMachineMemoryStatistics{} - if uvm.physicallyBacked { - // If the uvm is physically backed we set the working set to the total amount allocated - // to the UVM. AssignedMemory returns the number of 4KB pages. Will always be 4KB - // regardless of what the UVMs actual page size is so we don't need that information. - if props.Memory != nil { - s.Memory.WorkingSetBytes = props.Memory.VirtualMachineMemory.AssignedMemory * 4096 - } - } else { - // The HCS properties does not return sufficient information to calculate - // working set size for a VA-backed UVM. To work around this, we instead - // locate the vmmem process for the VM, and query that process's working set - // instead, which will be the working set for the VM. - vmmemProc, err := uvm.getVMMEMProcess(ctx) - if err != nil { - return nil, err - } - memCounters, err := process.GetProcessMemoryInfo(vmmemProc) - if err != nil { - return nil, err - } - s.Memory.WorkingSetBytes = uint64(memCounters.WorkingSetSize) - } - - if props.Memory != nil { - s.Memory.VirtualNodeCount = props.Memory.VirtualNodeCount - s.Memory.VmMemory = &stats.VirtualMachineMemory{} - s.Memory.VmMemory.AvailableMemory = props.Memory.VirtualMachineMemory.AvailableMemory - s.Memory.VmMemory.AvailableMemoryBuffer = props.Memory.VirtualMachineMemory.AvailableMemoryBuffer - s.Memory.VmMemory.ReservedMemory = props.Memory.VirtualMachineMemory.ReservedMemory - s.Memory.VmMemory.AssignedMemory = props.Memory.VirtualMachineMemory.AssignedMemory - s.Memory.VmMemory.SlpActive = props.Memory.VirtualMachineMemory.SlpActive - s.Memory.VmMemory.BalancingEnabled = props.Memory.VirtualMachineMemory.BalancingEnabled - s.Memory.VmMemory.DmOperationInProgress = props.Memory.VirtualMachineMemory.DmOperationInProgress - } - return s, nil + return uvm.vm.Stats(ctx) } diff --git a/internal/uvm/types.go b/internal/uvm/types.go index 24b632865c..b9ef407f4e 100644 --- a/internal/uvm/types.go +++ b/internal/uvm/types.go @@ -6,13 +6,11 @@ import ( "net" "sync" - "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/gcs" - "github.com/Microsoft/hcsshim/internal/hcs" "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/hns" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" - "golang.org/x/sys/windows" + "github.com/Microsoft/hcsshim/internal/vm" ) // | WCOW | LCOW @@ -40,11 +38,11 @@ type namespaceInfo struct { // UtilityVM is the object used by clients representing a utility VM type UtilityVM struct { + builder vm.UVMBuilder // Object used to construct the Utility VM configuration document + vm vm.UVM // Underlying interface to the virtstack used to launch the UVM. id string // Identifier for the utility VM (user supplied or generated) - runtimeID guid.GUID // Hyper-V VM ID owner string // Owner for the utility VM (user supplied or generated) operatingSystem string // "windows" or "linux" - hcsSystem *hcs.System // The handle to the compute system gcListener net.Listener // The GCS connection listener gc *gcs.GuestConnection // The GCS connection processorCount int32 @@ -102,15 +100,6 @@ type UtilityVM struct { entropyListener net.Listener - // Handle to the vmmem process associated with this UVM. Used to look up - // memory metrics for the UVM. - vmmemProcess windows.Handle - // Tracks the error returned when looking up the vmmem process. - vmmemErr error - // We only need to look up the vmmem process once, then we keep a handle - // open. - vmmemOnce sync.Once - // mountCounter is the number of mounts that have been added to the UVM // This is used in generating a unique mount path inside the UVM for every mount. // Access to this variable should be done atomically. diff --git a/internal/uvm/update_uvm.go b/internal/uvm/update_uvm.go index 635c81ae81..cc5a95c228 100644 --- a/internal/uvm/update_uvm.go +++ b/internal/uvm/update_uvm.go @@ -4,12 +4,13 @@ import ( "context" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" specs "github.com/opencontainers/runtime-spec/specs-go" ) func (uvm *UtilityVM) UpdateConstraints(ctx context.Context, data interface{}, annotations map[string]string) error { var memoryLimitInBytes *uint64 - var processorLimits *hcsschema.ProcessorLimits + var processorLimits *vm.ProcessorLimits switch resources := data.(type) { case *specs.WindowsResources: @@ -51,6 +52,5 @@ func (uvm *UtilityVM) UpdateConstraints(ctx context.Context, data interface{}, a return err } } - return nil } diff --git a/internal/uvm/virtual_device.go b/internal/uvm/virtual_device.go index 679941997f..86fb82df13 100644 --- a/internal/uvm/virtual_device.go +++ b/internal/uvm/virtual_device.go @@ -6,9 +6,9 @@ import ( "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" ) const ( @@ -84,38 +84,31 @@ func (uvm *UtilityVM) AssignDevice(ctx context.Context, deviceID string) (*VPCID return existingVPCIDevice, nil } - targetDevice := hcsschema.VirtualPciDevice{ - Functions: []hcsschema.VirtualPciFunction{ - { - DeviceInstancePath: deviceID, - }, - }, + pci, ok := uvm.vm.(vm.PCIManager) + if !ok || !uvm.vm.Supported(vm.PCI, vm.Add) { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping pci device add") } - - request := &hcsschema.ModifySettingRequest{ - ResourcePath: fmt.Sprintf(resourcepaths.VirtualPCIResourceFormat, vmBusGUID), - RequestType: requesttype.Add, - Settings: targetDevice, + if err := pci.AddDevice(ctx, deviceID, vmBusGUID); err != nil { + return nil, errors.Wrap(err, "failed to add vpci device") } // WCOW (when supported) does not require a guest request as part of the // device assignment if uvm.operatingSystem != "windows" { - // for LCOW, we need to make sure that specific paths relating to the - // device exist so they are ready to be used by later - // work in openGCS - request.GuestRequest = guestrequest.GuestRequest{ + // For LCOW, we need to make sure that specific paths relating to the + // device exist so they are ready to be used by later work in openGCS + guestReq := guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeVPCIDevice, RequestType: requesttype.Add, Settings: guestrequest.LCOWMappedVPCIDevice{ VMBusGUID: vmBusGUID, }, } + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return nil, err + } } - if err := uvm.modify(ctx, request); err != nil { - return nil, err - } result := &VPCIDevice{ vm: uvm, VMBusGUID: vmBusGUID, @@ -141,10 +134,14 @@ func (uvm *UtilityVM) removeDevice(ctx context.Context, deviceInstanceID string) vpci.refCount-- if vpci.refCount == 0 { delete(uvm.vpciDevices, deviceInstanceID) - return uvm.modify(ctx, &hcsschema.ModifySettingRequest{ - ResourcePath: fmt.Sprintf(resourcepaths.VirtualPCIResourceFormat, vpci.VMBusGUID), - RequestType: requesttype.Remove, - }) + + pci, ok := uvm.vm.(vm.PCIManager) + if !ok || !uvm.vm.Supported(vm.PCI, vm.Add) { + return errors.Wrap(vm.ErrNotSupported, "stopping pci device removal") + } + if err := pci.RemoveDevice(ctx, vpci.deviceInstanceID, vpci.VMBusGUID); err != nil { + return errors.Wrap(err, "failed to remove vpci device") + } } return nil } diff --git a/internal/uvm/vpmem.go b/internal/uvm/vpmem.go index 6d883220eb..892398e5cc 100644 --- a/internal/uvm/vpmem.go +++ b/internal/uvm/vpmem.go @@ -2,15 +2,14 @@ package uvm import ( "context" - "errors" "fmt" "os" "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -84,18 +83,17 @@ func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string) (_ string, return "", err } - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - Settings: hcsschema.VirtualPMemDevice{ - HostPath: hostPath, - ReadOnly: true, - ImageFormat: "Vhd1", - }, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), + vpmem, ok := uvm.vm.(vm.VPMemManager) + if !ok || !uvm.vm.Supported(vm.VPMem, vm.Add) { + return "", errors.Wrap(vm.ErrNotSupported, "stopping vpmem device add") + } + + if err := vpmem.AddVPMemDevice(ctx, deviceNumber, hostPath, true, vm.VPMemImageFormatVHD1); err != nil { + return "", errors.Wrap(err, "failed to add vpmem device") } uvmPath := fmt.Sprintf(lcowVPMEMLayerFmt, deviceNumber) - modification.GuestRequest = guestrequest.GuestRequest{ + guestReq := guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeVPMemDevice, RequestType: requesttype.Add, Settings: guestrequest.LCOWMappedVPMemDevice{ @@ -104,8 +102,8 @@ func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string) (_ string, }, } - if err := uvm.modify(ctx, modification); err != nil { - return "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err) + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return "", errors.Wrap(err, "failed guest request to add vpmem device") } uvm.vpmemDevices[deviceNumber] = &vpmemInfo{ @@ -137,22 +135,28 @@ func (uvm *UtilityVM) RemoveVPMEM(ctx context.Context, hostPath string) (err err device := uvm.vpmemDevices[deviceNumber] if device.refCount == 1 { - modification := &hcsschema.ModifySettingRequest{ + vpmem, ok := uvm.vm.(vm.VPMemManager) + if !ok || !uvm.vm.Supported(vm.VPMem, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping vpmem device removal") + } + + guestReq := guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeVPMemDevice, - RequestType: requesttype.Remove, - Settings: guestrequest.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: device.uvmPath, - }, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: device.uvmPath, }, } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) + if err := uvm.GuestRequest(ctx, guestReq); err != nil { + return errors.Wrap(err, "failed to remove vpmem device from guest") } + + if err := vpmem.RemoveVPMemDevice(ctx, deviceNumber, hostPath); err != nil { + return errors.Wrap(err, "failed to remove vpmem device") + } + log.G(ctx).WithFields(logrus.Fields{ "hostPath": device.hostPath, "uvmPath": device.uvmPath, diff --git a/internal/uvm/vsmb.go b/internal/uvm/vsmb.go index f81dfe8a01..66f9b59b72 100644 --- a/internal/uvm/vsmb.go +++ b/internal/uvm/vsmb.go @@ -10,12 +10,12 @@ import ( "strconv" "unsafe" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/winapi" "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -34,7 +34,7 @@ type VSMBShare struct { name string allowedFiles []string guestPath string - options hcsschema.VirtualSmbShareOptions + options vm.VSMBOptions serialVersionID uint32 } @@ -48,9 +48,9 @@ func (vsmb *VSMBShare) Release(ctx context.Context) error { // DefaultVSMBOptions returns the default VSMB options. If readOnly is specified, // returns the default VSMB options for a readonly share. -func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *hcsschema.VirtualSmbShareOptions { - opts := &hcsschema.VirtualSmbShareOptions{ - NoDirectmap: uvm.DevicesPhysicallyBacked() || uvm.VSMBNoDirectMap(), +func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *vm.VSMBOptions { + opts := &vm.VSMBOptions{ + NoDirectMap: uvm.DevicesPhysicallyBacked() || uvm.VSMBNoDirectMap(), } if readOnly { opts.ShareRead = true @@ -61,7 +61,7 @@ func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *hcsschema.VirtualSmbSha return opts } -func (uvm *UtilityVM) SetSaveableVSMBOptions(opts *hcsschema.VirtualSmbShareOptions, readOnly bool) { +func (uvm *UtilityVM) SetSaveableVSMBOptions(opts *vm.VSMBOptions, readOnly bool) { if readOnly { opts.ShareRead = true opts.CacheIo = true @@ -79,7 +79,7 @@ func (uvm *UtilityVM) SetSaveableVSMBOptions(opts *hcsschema.VirtualSmbShareOpti } opts.NoLocks = true opts.PseudoDirnotify = true - opts.NoDirectmap = true + opts.NoDirectMap = true } // findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`. @@ -157,7 +157,7 @@ func forceNoDirectMap(path string) (bool, error) { // AddVSMB adds a VSMB share to a Windows utility VM. Each VSMB share is ref-counted and // only added if it isn't already. This is used for read-only layers, mapped directories // to a container, and for mapped pipes. -func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcsschema.VirtualSmbShareOptions) (*VSMBShare, error) { +func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *vm.VSMBOptions) (*VSMBShare, error) { if uvm.operatingSystem != "windows" { return nil, errNotSupported } @@ -190,7 +190,7 @@ func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcs return nil, err } else if force { log.G(ctx).WithField("path", hostPath).Info("Forcing NoDirectmap for VSMB mount") - options.NoDirectmap = true + options.NoDirectMap = true } var requestType = requesttype.Update @@ -225,18 +225,13 @@ func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcs "options": fmt.Sprintf("%+#v", options), "operation": requestType, }).Info("Modifying VSMB share") - modification := &hcsschema.ModifySettingRequest{ - RequestType: requestType, - Settings: hcsschema.VirtualSmbShare{ - Name: share.name, - Options: options, - Path: hostPath, - AllowedFiles: newAllowedFiles, - }, - ResourcePath: resourcepaths.VSMBShareResourcePath, + + vsmb, ok := uvm.vm.(vm.VSMBManager) + if !ok || !uvm.vm.Supported(vm.VSMB, vm.Add) { + return nil, errors.Wrap(vm.ErrNotSupported, "stopping vsmb share add") } - if err := uvm.modify(ctx, modification); err != nil { - return nil, err + if err := vsmb.AddVSMB(ctx, hostPath, share.name, newAllowedFiles, options); err != nil { + return nil, errors.Wrap(err, "failed to add vsmb share") } } @@ -278,15 +273,13 @@ func (uvm *UtilityVM) RemoveVSMB(ctx context.Context, hostPath string, readOnly return nil } - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - Settings: hcsschema.VirtualSmbShare{Name: share.name}, - ResourcePath: resourcepaths.VSMBShareResourcePath, + vsmb, ok := uvm.vm.(vm.VSMBManager) + if !ok || !uvm.vm.Supported(vm.VSMB, vm.Remove) { + return errors.Wrap(vm.ErrNotSupported, "stopping vsmb share removal") } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove vsmb share %s from %s: %+v: %s", hostPath, uvm.id, modification, err) + if err := vsmb.RemoveVSMB(ctx, share.name); err != nil { + return errors.Wrapf(err, "failed to remove vsmb share %s from %s", hostPath, uvm.id) } - delete(m, shareKey) return nil } @@ -380,20 +373,21 @@ func (vsmb *VSMBShare) GobDecode(data []byte) error { return nil } -// Clone creates a clone of the VSMBShare `vsmb` and adds that clone to the uvm `vm`. To +// Clone creates a clone of the VSMBShare `vsmb` and adds that clone to the uvm `vm`. To // clone VSMB share we just need to add it into the config doc of that VM and increase the // vsmb counter. -func (vsmb *VSMBShare) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { - cd.doc.VirtualMachine.Devices.VirtualSmb.Shares = append(cd.doc.VirtualMachine.Devices.VirtualSmb.Shares, hcsschema.VirtualSmbShare{ - Name: vsmb.name, - Path: vsmb.HostPath, - Options: &vsmb.options, - AllowedFiles: vsmb.allowedFiles, - }) - vm.vsmbCounter++ +func (vsmb *VSMBShare) Clone(ctx context.Context, uvm *UtilityVM, cd *cloneData) error { + vsmbMgr, ok := cd.builder.(vm.VSMBManager) + if !ok { + return errors.Wrap(vm.ErrNotSupported, "stopping vsmb share operation") + } + if err := vsmbMgr.AddVSMB(ctx, vsmb.HostPath, vsmb.name, vsmb.allowedFiles, &vsmb.options); err != nil { + return err + } + uvm.vsmbCounter++ clonedVSMB := &VSMBShare{ - vm: vm, + vm: uvm, HostPath: vsmb.HostPath, refCount: 1, name: vsmb.name, @@ -404,11 +398,10 @@ func (vsmb *VSMBShare) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) } if vsmb.options.RestrictFileAccess { - vm.vsmbFileShares[vsmb.HostPath] = clonedVSMB + uvm.vsmbFileShares[vsmb.HostPath] = clonedVSMB } else { - vm.vsmbDirShares[vsmb.HostPath] = clonedVSMB + uvm.vsmbDirShares[vsmb.HostPath] = clonedVSMB } - return nil } diff --git a/internal/uvm/wait.go b/internal/uvm/wait.go index 552ee5fad7..f8298f7a31 100644 --- a/internal/uvm/wait.go +++ b/internal/uvm/wait.go @@ -7,12 +7,11 @@ import ( // Wait waits synchronously for a utility VM to terminate. func (uvm *UtilityVM) Wait() error { - err := uvm.hcsSystem.Wait() + err := uvm.vm.Wait() logrus.WithField(logfields.UVMID, uvm.id).Debug("uvm exited, waiting for output processing to complete") if uvm.outputProcessingDone != nil { <-uvm.outputProcessingDone } - return err } diff --git a/internal/vm/builder.go b/internal/vm/builder.go index 288daad76a..240fc999ee 100644 --- a/internal/vm/builder.go +++ b/internal/vm/builder.go @@ -4,10 +4,15 @@ import ( "context" ) +// CreateOpt can be used to apply virtstack specific settings during creation time. +type CreateOpt func(ctx context.Context, uvmb UVMBuilder) error + type UVMBuilder interface { // Create will create the Utility VM in a paused/powered off state with whatever is present in the implementation // of the interfaces config at the time of the call. - Create(ctx context.Context) (UVM, error) + // + // `opts` can be used to set virtstack specific configurations for the Utility VM. + Create(ctx context.Context, opts []CreateOpt) (UVM, error) } type MemoryBackingType uint8 @@ -29,7 +34,7 @@ type MemoryConfig struct { // MemoryManager handles setting and managing memory configurations for the Utility VM. type MemoryManager interface { // SetMemoryLimit sets the amount of memory in megabytes that the Utility VM will be assigned. - SetMemoryLimit(memoryMB uint64) error + SetMemoryLimit(ctx context.Context, memoryMB uint64) error // SetMemoryConfig sets an array of different memory configuration options available. This includes things like the // type of memory to back the VM (virtual/physical). SetMemoryConfig(config *MemoryConfig) error @@ -37,10 +42,19 @@ type MemoryManager interface { SetMMIOConfig(lowGapMB uint64, highBaseMB uint64, highGapMB uint64) error } +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 +} + // ProcessorManager handles setting and managing processor configurations for the Utility VM. type ProcessorManager interface { // SetProcessorCount sets the number of virtual processors that will be assigned to the Utility VM. SetProcessorCount(count uint32) error + SetProcessorLimits(ctx context.Context, limits *ProcessorLimits) error } // SerialManager manages setting up serial consoles for the Utility VM. diff --git a/internal/vm/hcs/builder.go b/internal/vm/hcs/builder.go index 676e114bf6..4bc7f53db6 100644 --- a/internal/vm/hcs/builder.go +++ b/internal/vm/hcs/builder.go @@ -5,20 +5,24 @@ import ( "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/vm" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var _ vm.UVMBuilder = &utilityVMBuilder{} type utilityVMBuilder struct { id string + owner string guestOS vm.GuestOS doc *hcsschema.ComputeSystem } -func NewUVMBuilder(id string, owner string, guestOS vm.GuestOS) (vm.UVMBuilder, error) { +func NewUVMBuilder(id, owner string, guestOS vm.GuestOS) (vm.UVMBuilder, error) { doc := &hcsschema.ComputeSystem{ Owner: owner, SchemaVersion: schemaversion.SchemaV21(), @@ -46,7 +50,17 @@ func NewUVMBuilder(id string, owner string, guestOS vm.GuestOS) (vm.UVMBuilder, switch guestOS { case vm.Windows: - doc.VirtualMachine.Devices.VirtualSmb = &hcsschema.VirtualSmb{} + doc.VirtualMachine.Devices.VirtualSmb = &hcsschema.VirtualSmb{ + DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere + } + doc.VirtualMachine.Chipset = &hcsschema.Chipset{ + Uefi: &hcsschema.Uefi{ + BootThis: &hcsschema.UefiBootEntry{ + DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`, + DeviceType: "VmbFs", + }, + }, + } case vm.Linux: doc.VirtualMachine.Devices.Plan9 = &hcsschema.Plan9{} default: @@ -55,12 +69,20 @@ func NewUVMBuilder(id string, owner string, guestOS vm.GuestOS) (vm.UVMBuilder, return &utilityVMBuilder{ id: id, + owner: owner, guestOS: guestOS, doc: doc, }, nil } -func (uvmb *utilityVMBuilder) Create(ctx context.Context) (_ vm.UVM, err error) { +func (uvmb *utilityVMBuilder) Create(ctx context.Context, opts []vm.CreateOpt) (_ vm.UVM, err error) { + // Apply any opts + for _, o := range opts { + if err := o(ctx, uvmb); err != nil { + return nil, errors.Wrap(err, "failed applying create options for Utility VM") + } + } + cs, err := hcs.CreateComputeSystem(ctx, uvmb.id, uvmb.doc) if err != nil { return nil, errors.Wrap(err, "failed to create hcs compute system") @@ -80,6 +102,7 @@ func (uvmb *utilityVMBuilder) Create(ctx context.Context) (_ vm.UVM, err error) uvm := &utilityVM{ id: uvmb.id, + owner: uvmb.owner, guestOS: uvmb.guestOS, cs: cs, backingType: backingType, @@ -90,5 +113,10 @@ func (uvmb *utilityVMBuilder) Create(ctx context.Context) (_ vm.UVM, err error) return nil, err } uvm.vmID = properties.RuntimeID + + log.G(ctx).WithFields(logrus.Fields{ + logfields.UVMID: uvm.id, + "runtime-id": uvm.vmID, + }).Debug("created utility VM") return uvm, nil } diff --git a/internal/vm/hcs/container.go b/internal/vm/hcs/container.go new file mode 100644 index 0000000000..30bc28fd87 --- /dev/null +++ b/internal/vm/hcs/container.go @@ -0,0 +1,27 @@ +package hcs + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" +) + +// These handle the case where we don't have a direct + +func (uvm *utilityVM) CreateContainer(ctx context.Context, config interface{}) (cow.Container, error) { + doc := hcsschema.ComputeSystem{ + HostingSystemId: uvm.id, + Owner: uvm.owner, + SchemaVersion: schemaversion.SchemaV21(), + ShouldTerminateOnLastHandleClosed: true, + HostedSystem: config, + } + return hcs.CreateComputeSystem(ctx, uvm.id, &doc) +} + +func (uvm *utilityVM) CreateProcess(ctx context.Context, config interface{}) (cow.Process, error) { + return uvm.cs.CreateProcess(ctx, config) +} diff --git a/internal/vm/hcs/hcs.go b/internal/vm/hcs/hcs.go index 1c7f337304..da0e8c2ef0 100644 --- a/internal/vm/hcs/hcs.go +++ b/internal/vm/hcs/hcs.go @@ -16,6 +16,7 @@ var _ vm.UVM = &utilityVM{} type utilityVM struct { id string + owner string guestOS vm.GuestOS cs *hcs.System backingType vm.MemoryBackingType @@ -29,6 +30,10 @@ func (uvm *utilityVM) ID() string { return uvm.id } +func (uvm *utilityVM) VmID() string { + return uvm.vmID.String() +} + func (uvm *utilityVM) Start(ctx context.Context) (err error) { if err := uvm.cs.Start(ctx); err != nil { return errors.Wrap(err, "failed to start utility VM") @@ -43,6 +48,14 @@ func (uvm *utilityVM) Stop(ctx context.Context) error { return nil } +func (uvm *utilityVM) Close() error { + _ = windows.Close(uvm.vmmemProcess) + if err := uvm.cs.Close(); err != nil { + return errors.Wrap(err, "failed to close Utility VM") + } + return nil +} + func (uvm *utilityVM) Pause(ctx context.Context) error { if err := uvm.cs.Pause(ctx); err != nil { return errors.Wrap(err, "failed to pause utility VM") diff --git a/internal/vm/hcs/memory.go b/internal/vm/hcs/memory.go index afb9932628..8b09f241c8 100644 --- a/internal/vm/hcs/memory.go +++ b/internal/vm/hcs/memory.go @@ -1,10 +1,14 @@ package hcs import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/vm" ) -func (uvmb *utilityVMBuilder) SetMemoryLimit(memoryMB uint64) error { +func (uvmb *utilityVMBuilder) SetMemoryLimit(ctx context.Context, memoryMB uint64) error { uvmb.doc.VirtualMachine.ComputeTopology.Memory.SizeInMB = memoryMB return nil } @@ -26,3 +30,11 @@ func (uvmb *utilityVMBuilder) SetMMIOConfig(lowGapMB uint64, highBaseMB uint64, memory.HighMMIOGapInMB = highGapMB return nil } + +func (uvm *utilityVM) SetMemoryLimit(ctx context.Context, memoryMB uint64) error { + req := &hcsschema.ModifySettingRequest{ + ResourcePath: resourcepaths.MemoryResourcePath, + Settings: memoryMB, + } + return uvm.cs.Modify(ctx, req) +} diff --git a/internal/vm/hcs/network.go b/internal/vm/hcs/network.go index 6610fffd39..680fbee2d3 100644 --- a/internal/vm/hcs/network.go +++ b/internal/vm/hcs/network.go @@ -7,6 +7,7 @@ import ( "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/vm" ) func (uvm *utilityVM) AddNIC(ctx context.Context, nicID, endpointID, macAddr string) error { @@ -32,3 +33,21 @@ func (uvm *utilityVM) RemoveNIC(ctx context.Context, nicID, endpointID, macAddr } return uvm.cs.Modify(ctx, request) } + +func (uvm *utilityVM) UpdateNIC(ctx context.Context, nicID string, nic *vm.NetworkAdapter) error { + moderationName := hcsschema.InterruptModerationName(*nic.IovSettings.InterruptModeration) + req := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Update, + ResourcePath: fmt.Sprintf(resourcepaths.NetworkResourceFormat, nicID), + Settings: hcsschema.NetworkAdapter{ + EndpointId: nic.EndpointId, + MacAddress: nic.MacAddress, + IovSettings: &hcsschema.IovSettings{ + OffloadWeight: nic.IovSettings.OffloadWeight, + QueuePairsRequested: nic.IovSettings.QueuePairsRequested, + InterruptModeration: &moderationName, + }, + }, + } + return uvm.cs.Modify(ctx, req) +} diff --git a/internal/vm/hcs/opts.go b/internal/vm/hcs/opts.go new file mode 100644 index 0000000000..0b23bd39a2 --- /dev/null +++ b/internal/vm/hcs/opts.go @@ -0,0 +1,54 @@ +package hcs + +import ( + "context" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" +) + +// WithEnableCompartmentNamespace sets whether to enable namespacing the network compartment in the UVM +// for WCOW. Namespacing makes it so the compartment created for a container is essentially no longer +// aware or able to see any of the other compartments on the host (in this case the UVM). +func WithEnableCompartmentNamespace() vm.CreateOpt { + return func(ctx context.Context, uvmb vm.UVMBuilder) error { + builder, ok := uvmb.(*utilityVMBuilder) + if !ok { + return errors.New("object is not an hcs UVMBuilder") + } + // Here for a temporary workaround until the need for setting this regkey is no more. To protect + // against any undesired behavior (such as some general networking scenarios ceasing to function) + // with a recent change to fix SMB share access in the UVM, this registry key will be checked to + // enable the change in question inside GNS.dll. + builder.doc.VirtualMachine.RegistryChanges = &hcsschema.RegistryChanges{ + AddValues: []hcsschema.RegistryValue{ + { + Key: &hcsschema.RegistryKey{ + Hive: "System", + Name: "CurrentControlSet\\Services\\gns", + }, + Name: "EnableCompartmentNamespace", + DWordValue: 1, + Type_: "DWord", + }, + }, + } + return nil + } +} + +// WithCloneConfig sets the necessary options for a cloneable Utility VM. +func WithCloneConfig(templateID string) vm.CreateOpt { + return func(ctx context.Context, uvmb vm.UVMBuilder) error { + builder, ok := uvmb.(*utilityVMBuilder) + if !ok { + return errors.New("object is not an hcs UVMBuilder") + } + + builder.doc.VirtualMachine.RestoreState = &hcsschema.RestoreState{ + TemplateSystemId: templateID, + } + return nil + } +} diff --git a/internal/vm/hcs/pipe.go b/internal/vm/hcs/pipe.go new file mode 100644 index 0000000000..dd66594266 --- /dev/null +++ b/internal/vm/hcs/pipe.go @@ -0,0 +1,26 @@ +package hcs + +import ( + "context" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/requesttype" +) + +func (uvm *utilityVM) AddPipe(ctx context.Context, hostPath string) error { + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), + } + return uvm.cs.Modify(ctx, &modification) +} + +func (uvm *utilityVM) RemovePipe(ctx context.Context, hostPath string) error { + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: fmt.Sprintf(resourcepaths.MappedPipeResourceFormat, hostPath), + } + return uvm.cs.Modify(ctx, &modification) +} diff --git a/internal/vm/hcs/processor.go b/internal/vm/hcs/processor.go index fcbe12954e..1a9800ea9a 100644 --- a/internal/vm/hcs/processor.go +++ b/internal/vm/hcs/processor.go @@ -1,6 +1,39 @@ package hcs +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vm" +) + func (uvmb *utilityVMBuilder) SetProcessorCount(count uint32) error { uvmb.doc.VirtualMachine.ComputeTopology.Processor.Count = int32(count) return nil } + +func (uvmb *utilityVMBuilder) SetProcessorLimits(ctx context.Context, limits *vm.ProcessorLimits) error { + uvmb.doc.VirtualMachine.ComputeTopology.Processor.Limit = int32(limits.Limit) + uvmb.doc.VirtualMachine.ComputeTopology.Processor.Weight = int32(limits.Weight) + return nil +} + +func (uvm *utilityVM) SetProcessorCount(count uint32) error { + return vm.ErrNotSupported +} + +func vmProcessorLimitsToHCS(limits *vm.ProcessorLimits) *hcsschema.ProcessorLimits { + return &hcsschema.ProcessorLimits{ + Limit: limits.Limit, + Weight: limits.Weight, + } +} + +func (uvm *utilityVM) SetProcessorLimits(ctx context.Context, limits *vm.ProcessorLimits) error { + req := &hcsschema.ModifySettingRequest{ + ResourcePath: resourcepaths.CPULimitsResourcePath, + Settings: vmProcessorLimitsToHCS(limits), + } + return uvm.cs.Modify(ctx, req) +} diff --git a/internal/vm/hcs/scsi.go b/internal/vm/hcs/scsi.go index 1fc2bba473..8977d4689a 100644 --- a/internal/vm/hcs/scsi.go +++ b/internal/vm/hcs/scsi.go @@ -14,10 +14,13 @@ import ( func (uvmb *utilityVMBuilder) AddSCSIController(id uint32) error { if uvmb.doc.VirtualMachine.Devices.Scsi == nil { - uvmb.doc.VirtualMachine.Devices.Scsi = make(map[string]hcsschema.Scsi, 1) + uvmb.doc.VirtualMachine.Devices.Scsi = make(map[string]hcsschema.Scsi) } - uvmb.doc.VirtualMachine.Devices.Scsi[strconv.Itoa(int(id))] = hcsschema.Scsi{ - Attachments: make(map[string]hcsschema.Attachment), + conStr := strconv.Itoa(int(id)) + if _, ok := uvmb.doc.VirtualMachine.Devices.Scsi[conStr]; !ok { + uvmb.doc.VirtualMachine.Devices.Scsi[conStr] = hcsschema.Scsi{ + Attachments: map[string]hcsschema.Attachment{}, + } } return nil } @@ -32,9 +35,13 @@ func (uvmb *utilityVMBuilder) AddSCSIDisk(ctx context.Context, controller uint32 return fmt.Errorf("no scsi controller with index %d found", controller) } + scsiType, err := getSCSIDiskTypeString(typ) + if err != nil { + return err + } ctrl.Attachments[strconv.Itoa(int(lun))] = hcsschema.Attachment{ Path: path, - Type_: string(typ), + Type_: scsiType, ReadOnly: readOnly, } diff --git a/internal/vm/hcs/storage.go b/internal/vm/hcs/storage.go index 9b2317a036..b63a292a72 100644 --- a/internal/vm/hcs/storage.go +++ b/internal/vm/hcs/storage.go @@ -1,7 +1,11 @@ package hcs +import hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + func (uvmb *utilityVMBuilder) SetStorageQos(iopsMaximum int64, bandwidthMaximum int64) error { - uvmb.doc.VirtualMachine.StorageQoS.BandwidthMaximum = int32(bandwidthMaximum) - uvmb.doc.VirtualMachine.StorageQoS.IopsMaximum = int32(iopsMaximum) + uvmb.doc.VirtualMachine.StorageQoS = &hcsschema.StorageQoS{ + BandwidthMaximum: int32(bandwidthMaximum), + IopsMaximum: int32(iopsMaximum), + } return nil } diff --git a/internal/vm/hcs/vmsocket.go b/internal/vm/hcs/vmsocket.go index 22e268858a..02d24580d1 100644 --- a/internal/vm/hcs/vmsocket.go +++ b/internal/vm/hcs/vmsocket.go @@ -2,10 +2,14 @@ package hcs import ( "context" + "fmt" "net" "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/requesttype" "github.com/Microsoft/hcsshim/internal/vm" "github.com/pkg/errors" ) @@ -29,6 +33,23 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket } } +func (uvm *utilityVM) UpdateVMSocket(ctx context.Context, socketType vm.VMSocketType, sid string, serviceConfig *vm.HvSocketServiceConfig) error { + if socketType != vm.HvSocket { + return vm.ErrNotSupported + } + request := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Update, + ResourcePath: fmt.Sprintf(resourcepaths.HvSocketConfigResourceFormat, sid), + Settings: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: serviceConfig.BindSecurityDescriptor, + ConnectSecurityDescriptor: serviceConfig.ConnectSecurityDescriptor, + Disabled: serviceConfig.Disabled, + AllowWildcardBinds: serviceConfig.AllowWildcardBinds, + }, + } + return uvm.cs.Modify(ctx, request) +} + func (uvm *utilityVM) hvSocketListen(ctx context.Context, serviceID guid.GUID) (net.Listener, error) { return winio.ListenHvsock(&winio.HvsockAddr{ VMID: uvm.vmID, diff --git a/internal/vm/hcs/vsmb.go b/internal/vm/hcs/vsmb.go index b5cbade5b7..fa5ee579a8 100644 --- a/internal/vm/hcs/vsmb.go +++ b/internal/vm/hcs/vsmb.go @@ -10,17 +10,18 @@ import ( ) func (uvmb *utilityVMBuilder) AddVSMB(ctx context.Context, path string, name string, allowed []string, options *vm.VSMBOptions) error { - uvmb.doc.VirtualMachine.Devices.VirtualSmb = &hcsschema.VirtualSmb{ - DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere - Shares: []hcsschema.VirtualSmbShare{ - { - Name: name, - Path: path, - AllowedFiles: allowed, - Options: vmVSMBOptionsToHCS(options), - }, - }, + if uvmb.doc.VirtualMachine.Devices.VirtualSmb == nil { + uvmb.doc.VirtualMachine.Devices.VirtualSmb = &hcsschema.VirtualSmb{} } + + uvmb.doc.VirtualMachine.Devices.VirtualSmb.Shares = append(uvmb.doc.VirtualMachine.Devices.VirtualSmb.Shares, + hcsschema.VirtualSmbShare{ + Name: name, + Path: path, + AllowedFiles: allowed, + Options: vmVSMBOptionsToHCS(options), + }, + ) return nil } @@ -38,6 +39,9 @@ func vmVSMBOptionsToHCS(options *vm.VSMBOptions) *hcsschema.VirtualSmbShareOptio TakeBackupPrivilege: options.TakeBackupPrivilege, PseudoOplocks: options.PseudoOplocks, PseudoDirnotify: options.PseudoDirnotify, + NoLocks: options.NoLocks, + RestrictFileAccess: options.RestrictFileAccess, + SingleFileMapping: options.SingleFileMapping, } } diff --git a/internal/vm/hcs/windows.go b/internal/vm/hcs/windows.go index e4775b0528..9e1c3ee769 100644 --- a/internal/vm/hcs/windows.go +++ b/internal/vm/hcs/windows.go @@ -3,6 +3,7 @@ package hcs import ( "context" + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" ) @@ -10,3 +11,13 @@ func (uvmb *utilityVMBuilder) SetCPUGroup(ctx context.Context, id string) error uvmb.doc.VirtualMachine.ComputeTopology.Processor.CpuGroup = &hcsschema.CpuGroup{Id: id} return nil } + +func (uvm *utilityVM) SetCPUGroup(ctx context.Context, id string) error { + req := &hcsschema.ModifySettingRequest{ + ResourcePath: resourcepaths.CPUGroupResourcePath, + Settings: &hcsschema.CpuGroup{ + Id: id, + }, + } + return uvm.cs.Modify(ctx, req) +} diff --git a/internal/vm/remotevm/builder.go b/internal/vm/remotevm/builder.go index b289428044..1b717f19e0 100644 --- a/internal/vm/remotevm/builder.go +++ b/internal/vm/remotevm/builder.go @@ -9,6 +9,7 @@ import ( "github.com/Microsoft/hcsshim/internal/jobobject" "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" "github.com/containerd/ttrpc" @@ -20,14 +21,15 @@ import ( var _ vm.UVMBuilder = &utilityVMBuilder{} type utilityVMBuilder struct { - id string - guestOS vm.GuestOS - job *jobobject.JobObject - config *vmservice.VMConfig - client vmservice.VMService + id, binpath, addr string + guestOS vm.GuestOS + ignoreSupported bool + job *jobobject.JobObject + config *vmservice.VMConfig + client vmservice.VMService } -func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS vm.GuestOS) (vm.UVMBuilder, error) { +func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS vm.GuestOS) (_ vm.UVMBuilder, err error) { var job *jobobject.JobObject if binPath != "" { log.G(ctx).WithFields(logrus.Fields{ @@ -38,11 +40,23 @@ func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS opts := &jobobject.Options{ Name: id, } - job, err := jobobject.Create(ctx, opts) + job, err = jobobject.Create(ctx, opts) if err != nil { return nil, errors.Wrap(err, "failed to create job object for remotevm process") } + if err := job.SetTerminateOnLastHandleClose(); err != nil { + return nil, errors.Wrap(err, "failed to set terminate on last handle closed for remotevm job object") + } + + // If no address passed, just generate a random one. + if addr == "" { + addr, err = randomUnixSockAddr() + if err != nil { + return nil, err + } + } + cmd := exec.Command(binPath, "--ttrpc", addr) p, err := cmd.StdoutPipe() if err != nil { @@ -57,10 +71,6 @@ func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS return nil, errors.Wrap(err, "failed to assign remotevm process to job") } - if err := job.SetTerminateOnLastHandleClose(); err != nil { - return nil, errors.Wrap(err, "failed to set terminate on last handle closed for remotevm job object") - } - // Wait for stdout to close. This is our signal that the server is successfully up and running. _, _ = io.Copy(ioutil.Discard, p) } @@ -88,22 +98,44 @@ func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS }, nil } -func (uvmb *utilityVMBuilder) Create(ctx context.Context) (vm.UVM, error) { - // Grab what capabilities the virtstack supports up front. - capabilities, err := uvmb.client.CapabilitiesVM(ctx, &ptypes.Empty{}) - if err != nil { - return nil, errors.Wrap(err, "failed to get virtstack capabilities from vmservice") +func (uvmb *utilityVMBuilder) Create(ctx context.Context, opts []vm.CreateOpt) (_ vm.UVM, err error) { + // Apply any opts + for _, o := range opts { + if err := o(ctx, uvmb); err != nil { + return nil, errors.Wrap(err, "failed applying create options for Utility VM") + } + } + + var capabilities *vmservice.CapabilitiesVMResponse + if !uvmb.ignoreSupported { + // Grab what capabilities the virtstack supports up front. + capabilities, err = uvmb.client.CapabilitiesVM(ctx, &ptypes.Empty{}) + if err != nil { + return nil, errors.Wrap(err, "failed to get virtstack capabilities from vmservice") + } } - if _, err := uvmb.client.CreateVM(ctx, &vmservice.CreateVMRequest{Config: uvmb.config, LogID: uvmb.id}); err != nil { + _, err = uvmb.client.CreateVM(ctx, &vmservice.CreateVMRequest{Config: uvmb.config, LogID: uvmb.id}) + if err != nil { return nil, errors.Wrap(err, "failed to create remote VM") } - return &utilityVM{ - id: uvmb.id, - job: uvmb.job, - config: uvmb.config, - client: uvmb.client, - capabilities: capabilities, - }, nil + log.G(ctx).WithFields(logrus.Fields{ + logfields.UVMID: uvmb.id, + "vmservice-address": uvmb.addr, + "vmservice-binary-path": uvmb.binpath, + }).Debug("created utility VM") + + uvm := &utilityVM{ + id: uvmb.id, + job: uvmb.job, + waitBlock: make(chan struct{}), + ignoreSupported: uvmb.ignoreSupported, + config: uvmb.config, + client: uvmb.client, + capabilities: capabilities, + } + + go uvm.waitBackground() + return uvm, nil } diff --git a/internal/vm/remotevm/memory.go b/internal/vm/remotevm/memory.go index f0933cfc4e..da233334a5 100644 --- a/internal/vm/remotevm/memory.go +++ b/internal/vm/remotevm/memory.go @@ -1,11 +1,13 @@ package remotevm import ( + "context" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" ) -func (uvmb *utilityVMBuilder) SetMemoryLimit(memoryMB uint64) error { +func (uvmb *utilityVMBuilder) SetMemoryLimit(ctx context.Context, memoryMB uint64) error { if uvmb.config.MemoryConfig == nil { uvmb.config.MemoryConfig = &vmservice.MemoryConfig{} } @@ -22,7 +24,7 @@ func (uvmb *utilityVMBuilder) SetMemoryConfig(config *vm.MemoryConfig) error { uvmb.config.MemoryConfig.ColdDiscardHint = config.ColdDiscardHint uvmb.config.MemoryConfig.DeferredCommit = config.DeferredCommit uvmb.config.MemoryConfig.HotHint = config.HotHint - return vm.ErrNotSupported + return nil } func (uvmb *utilityVMBuilder) SetMMIOConfig(lowGapMB uint64, highBaseMB uint64, highGapMB uint64) error { diff --git a/internal/vm/remotevm/network.go b/internal/vm/remotevm/network.go index ceddeb4752..50f356efe9 100644 --- a/internal/vm/remotevm/network.go +++ b/internal/vm/remotevm/network.go @@ -8,6 +8,7 @@ import ( "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/hcn" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" "github.com/pkg/errors" ) @@ -115,6 +116,9 @@ func (uvm *utilityVM) RemoveNIC(ctx context.Context, nicID, endpointID, macAddr ); err != nil { return errors.Wrap(err, "failed to remove network adapter") } - return nil } + +func (uvm *utilityVM) UpdateNIC(ctx context.Context, nicID string, nic *vm.NetworkAdapter) error { + return vm.ErrNotSupported +} diff --git a/internal/vm/remotevm/opts.go b/internal/vm/remotevm/opts.go new file mode 100644 index 0000000000..672274f275 --- /dev/null +++ b/internal/vm/remotevm/opts.go @@ -0,0 +1,19 @@ +package remotevm + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/vm" + "github.com/pkg/errors" +) + +func WithIgnoreSupported() vm.CreateOpt { + return func(ctx context.Context, uvmb vm.UVMBuilder) error { + builder, ok := uvmb.(*utilityVMBuilder) + if !ok { + return errors.New("object is not a remotevm UVMBuilder") + } + builder.ignoreSupported = true + return nil + } +} diff --git a/internal/vm/remotevm/processor.go b/internal/vm/remotevm/processor.go index 9e1ca2264a..45acc65470 100644 --- a/internal/vm/remotevm/processor.go +++ b/internal/vm/remotevm/processor.go @@ -3,13 +3,18 @@ package remotevm import ( "context" + "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" ) -func (uvmb *utilityVMBuilder) SetProcessorCount(ctx context.Context, count uint32) error { +func (uvmb *utilityVMBuilder) SetProcessorCount(count uint32) error { if uvmb.config.ProcessorConfig == nil { uvmb.config.ProcessorConfig = &vmservice.ProcessorConfig{} } uvmb.config.ProcessorConfig.ProcessorCount = count return nil } + +func (uvmb *utilityVMBuilder) SetProcessorLimits(ctx context.Context, limits *vm.ProcessorLimits) error { + return vm.ErrNotSupported +} diff --git a/internal/vm/remotevm/remotevm.go b/internal/vm/remotevm/remotevm.go index 733e33b49d..3518a8eff5 100644 --- a/internal/vm/remotevm/remotevm.go +++ b/internal/vm/remotevm/remotevm.go @@ -2,6 +2,7 @@ package remotevm import ( "context" + "sync" "github.com/Microsoft/hcsshim/internal/jobobject" "github.com/Microsoft/hcsshim/internal/vm" @@ -13,12 +14,15 @@ import ( var _ vm.UVM = &utilityVM{} type utilityVM struct { - id string - waitError error - job *jobobject.JobObject - config *vmservice.VMConfig - client vmservice.VMService - capabilities *vmservice.CapabilitiesVMResponse + id string + waitBlock chan struct{} + closedWaitOnce sync.Once + waitError error + ignoreSupported bool + job *jobobject.JobObject + config *vmservice.VMConfig + client vmservice.VMService + capabilities *vmservice.CapabilitiesVMResponse } var vmSupportedResourceToVMService = map[vm.Resource]vmservice.CapabilitiesVMResponse_Resource{ @@ -35,6 +39,10 @@ func (uvm *utilityVM) ID() string { return uvm.id } +func (uvm *utilityVM) VmID() string { + return "" +} + func (uvm *utilityVM) Start(ctx context.Context) error { // The expectation is the VM should be in a paused state after creation. if _, err := uvm.client.ResumeVM(ctx, &ptypes.Empty{}); err != nil { @@ -50,13 +58,25 @@ func (uvm *utilityVM) Stop(ctx context.Context) error { return nil } +func (uvm *utilityVM) Close() error { + err := uvm.job.Close() + uvm.closedWaitOnce.Do(func() { + close(uvm.waitBlock) + }) + return err +} + func (uvm *utilityVM) Wait() error { + <-uvm.waitBlock + return uvm.waitError +} + +func (uvm *utilityVM) waitBackground() { _, err := uvm.client.WaitVM(context.Background(), &ptypes.Empty{}) - if err != nil { + uvm.closedWaitOnce.Do(func() { uvm.waitError = err - return errors.Wrap(err, "failed to wait on remote VM") - } - return nil + close(uvm.waitBlock) + }) } func (uvm *utilityVM) Pause(ctx context.Context) error { @@ -78,6 +98,9 @@ func (uvm *utilityVM) Save(ctx context.Context) error { } func (uvm *utilityVM) Supported(resource vm.Resource, operation vm.ResourceOperation) bool { + if uvm.ignoreSupported { + return true + } var foundResource *vmservice.CapabilitiesVMResponse_SupportedResource for _, supportedResource := range uvm.capabilities.SupportedResources { if vmSupportedResourceToVMService[resource] == supportedResource.Resource { diff --git a/internal/vm/remotevm/vmsocket.go b/internal/vm/remotevm/vmsocket.go index 048c9e48bd..ee7f30cc34 100644 --- a/internal/vm/remotevm/vmsocket.go +++ b/internal/vm/remotevm/vmsocket.go @@ -12,24 +12,35 @@ import ( "github.com/pkg/errors" ) -func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocketType, connID interface{}) (_ net.Listener, err error) { +// Get a random unix socket address to use. The "randomness" equates to makes a temp file to reserve a name +// and then shortly after deleting it and using this as the socket address. +func randomUnixSockAddr() (string, error) { // Make a temp file and delete to "reserve" a unique name for the unix socket f, err := ioutil.TempFile("", "") if err != nil { - return nil, errors.Wrap(err, "failed to create temp file for unix socket") + return "", errors.Wrap(err, "failed to create temp file for unix socket") } if err := f.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close temp file") + return "", errors.Wrap(err, "failed to close temp file") } if err := os.Remove(f.Name()); err != nil { - return nil, errors.Wrap(err, "failed to delete temp file to free up name") + return "", errors.Wrap(err, "failed to delete temp file to free up name") } - l, err := net.Listen("unix", f.Name()) + return f.Name(), nil +} + +func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocketType, connID interface{}) (_ net.Listener, err error) { + addr, err := randomUnixSockAddr() if err != nil { - return nil, errors.Wrapf(err, "failed to listen on unix socket %q", f.Name()) + return nil, err + } + + l, err := net.Listen("unix", addr) + if err != nil { + return nil, errors.Wrapf(err, "failed to listen on unix socket %q", addr) } defer func() { @@ -44,7 +55,7 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket if !ok { return nil, errors.New("parameter passed to hvsocketlisten is not a GUID") } - if err := uvm.hvSocketListen(ctx, serviceGUID.String(), f.Name()); err != nil { + if err := uvm.hvSocketListen(ctx, serviceGUID.String(), addr); err != nil { return nil, errors.Wrap(err, "failed to setup relay to hvsocket listener") } case vm.VSock: @@ -52,7 +63,7 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket if !ok { return nil, errors.New("parameter passed to vsocklisten is not the right type") } - if err := uvm.vsockListen(ctx, port, f.Name()); err != nil { + if err := uvm.vsockListen(ctx, port, addr); err != nil { return nil, errors.Wrap(err, "failed to setup relay to vsock listener") } default: @@ -62,7 +73,11 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket return l, nil } -func (uvm *utilityVM) hvSocketListen(ctx context.Context, serviceID string, listenerPath string) error { +func (uvm *utilityVM) UpdateVMSocket(ctx context.Context, socketType vm.VMSocketType, sid string, serviceConfig *vm.HvSocketServiceConfig) error { + return vm.ErrNotSupported +} + +func (uvm *utilityVM) hvSocketListen(ctx context.Context, serviceID, listenerPath string) error { if _, err := uvm.client.VMSocket(ctx, &vmservice.VMSocketRequest{ Type: vmservice.ModifyType_ADD, Config: &vmservice.VMSocketRequest_HvsocketList{ diff --git a/internal/vm/vm.go b/internal/vm/vm.go index 4e0b31ca5f..6450b54292 100644 --- a/internal/vm/vm.go +++ b/internal/vm/vm.go @@ -6,6 +6,7 @@ import ( "net" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" ) var ( @@ -24,9 +25,12 @@ const ( // Start, and Stop and also several optional nested interfaces that can be used to determine what the virtual machine // supports and to configure these resources. type UVM interface { - // ID will return a string identifier for the Utility VM. + // ID will return a string identifier for the Utility VM. This will generally be user supplied. ID() string + // VmID returns the ID that the virtstack uses to identify the VM (if any). + VmID() string + // Start will power on the Utility VM and put it into a running state. This will boot the guest OS and start all of the // devices configured on the machine. Start(ctx context.Context) error @@ -34,6 +38,9 @@ type UVM interface { // Stop will shutdown the Utility VM and place it into a terminated state. Stop(ctx context.Context) error + // Close will free up any resources for the Utility VM. + Close() error + // Pause will place the Utility VM into a paused state. The guest OS will be halted and any devices will have be in a // a suspended state. Save can be used to snapshot the current state of the virtual machine, and Resume can be used to // place the virtual machine back into a running state. @@ -72,6 +79,7 @@ const ( VSMB PCI Plan9 + Pipe Memory Processor CPUGroup @@ -134,6 +142,39 @@ type VPMemManager interface { RemoveVPMemDevice(ctx context.Context, id uint32, path string) error } +type IovSettings = hcsschema.IovSettings +type NetworkAdapter = hcsschema.NetworkAdapter +type InterruptModerationName = hcsschema.InterruptModerationName +type InterruptModerationValue = hcsschema.InterruptModerationValue + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} + // NetworkManager manages adding and removing network adapters for a Utility VM. type NetworkManager interface { // AddNIC adds a network adapter to the Utility VM. `nicID` should be a string representation of a @@ -142,6 +183,8 @@ type NetworkManager interface { // RemoveNIC removes a network adapter from the Utility VM. `nicID` should be a string representation of a // Windows GUID. RemoveNIC(ctx context.Context, nicID string, endpointID string, macAddr string) error + // UpdateNIC updates a network adapter attached to the Utility VM. + UpdateNIC(ctx context.Context, nicID string, nic *NetworkAdapter) error } // PCIManager manages assiging pci devices to a Utility VM. This is Windows specific at the moment. @@ -161,12 +204,16 @@ const ( VSock ) +type HvSocketServiceConfig = hcsschema.HvSocketServiceConfig + // VMSocketManager manages configuration for a hypervisor socket transport. This includes sockets such as // HvSocket and Vsock. type VMSocketManager interface { // VMSocketListen will create the requested vmsocket type and listen on the address specified by `connID`. // For HvSocket the type expected is a GUID, for Vsock it's a port of type uint32. VMSocketListen(ctx context.Context, socketType VMSocketType, connID interface{}) (net.Listener, error) + // UpdateVMSocket updates settings on the underlying socket transport. + UpdateVMSocket(ctx context.Context, socketType VMSocketType, sid string, serviceConfig *HvSocketServiceConfig) error } // VSMBOptions @@ -178,7 +225,10 @@ type VSMBOptions struct { ShareRead bool TakeBackupPrivilege bool NoOplocks bool + SingleFileMapping bool + RestrictFileAccess bool PseudoDirnotify bool + NoLocks bool } // VSMBManager manages adding virtual smb shares to a Utility VM. @@ -196,3 +246,11 @@ type Plan9Manager interface { // RemovePlan9 removes a plan 9 share from a running Utility VM. RemovePlan9(ctx context.Context, name string, port int32) error } + +// PipeManager manages adding named pipes to a Utility VM. +type PipeManager interface { + // AddPipe adds a named pipe to a running Utility VM + AddPipe(ctx context.Context, path string) error + // RemovePipe removes a named pipe from a running Utility VM + RemovePipe(ctx context.Context, path string) error +}