From fd0cf7b895597b3e9af63895345da10d620a620a Mon Sep 17 00:00:00 2001 From: Kathryn Baldauf Date: Mon, 9 Aug 2021 17:39:52 -0700 Subject: [PATCH] Fix build break in functional tests Signed-off-by: Kathryn Baldauf --- test/functional/wcow_test.go | 5 +- .../github.com/Microsoft/hcsshim/go.mod | 1 + .../github.com/Microsoft/hcsshim/go.sum | 1 + .../hcsshim/internal/guestrequest/types.go | 10 +- .../hcsshim/internal/hcsoci/devices.go | 15 ++ .../hcsshim/internal/hcsoci/hcsdoc_wcow.go | 6 +- .../hcsshim/internal/hcsoci/resources_lcow.go | 2 +- .../hcsshim/internal/hcsoci/resources_wcow.go | 2 +- .../hcsshim/internal/layers/layers.go | 72 +++++-- .../hcsshim/internal/oci/annotations.go | 3 + .../Microsoft/hcsshim/internal/oci/uvm.go | 2 + .../hcsshim/internal/uvm/combine_layers.go | 24 ++- .../hcsshim/internal/uvm/create_lcow.go | 2 + .../hcsshim/internal/uvm/security_policy.go | 50 +++++ .../pkg/securitypolicy/securitypolicy.go | 38 ++++ .../securitypolicy/securitypolicyenforcer.go | 204 ++++++++++++++++++ test/vendor/modules.txt | 1 + 17 files changed, 409 insertions(+), 29 deletions(-) create mode 100644 test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go create mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicy.go create mode 100644 test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go diff --git a/test/functional/wcow_test.go b/test/functional/wcow_test.go index 174405c7bb..c620ea6cc0 100644 --- a/test/functional/wcow_test.go +++ b/test/functional/wcow_test.go @@ -384,13 +384,14 @@ func TestWCOWArgonShim(t *testing.T) { } }() + id := "argon" // This is a cheat but stops us re-writing exactly the same code just for test - argonShimLocalMountPath, err := layerspkg.MountContainerLayers(context.Background(), append(imageLayers, argonShimScratchDir), "", "", nil) + argonShimLocalMountPath, err := layerspkg.MountContainerLayers(context.Background(), id, append(imageLayers, argonShimScratchDir), "", "", nil) if err != nil { t.Fatal(err) } argonShimMounted = true - argonShim, err := hcsshim.CreateContainer("argon", &hcsshim.ContainerConfig{ + argonShim, err := hcsshim.CreateContainer(id, &hcsshim.ContainerConfig{ SystemType: "Container", Name: "argonShim", VolumePath: argonShimLocalMountPath, diff --git a/test/vendor/github.com/Microsoft/hcsshim/go.mod b/test/vendor/github.com/Microsoft/hcsshim/go.mod index b9db24af84..1556ffe635 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/go.mod +++ b/test/vendor/github.com/Microsoft/hcsshim/go.mod @@ -3,6 +3,7 @@ module github.com/Microsoft/hcsshim go 1.13 require ( + github.com/BurntSushi/toml v0.3.1 github.com/Microsoft/go-winio v0.4.17 github.com/containerd/cgroups v1.0.1 github.com/containerd/console v1.0.2 diff --git a/test/vendor/github.com/Microsoft/hcsshim/go.sum b/test/vendor/github.com/Microsoft/hcsshim/go.sum index e5443094d2..87d1893f05 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/go.sum +++ b/test/vendor/github.com/Microsoft/hcsshim/go.sum @@ -34,6 +34,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go index 548a8b43fd..a03cf09aa3 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go @@ -16,7 +16,14 @@ import ( // since the container path is already the scratch path. For linux, the GCS unions // the specified layers and ScratchPath together, placing the resulting union // filesystem at ContainerRootPath. -type CombinedLayers struct { +type LCOWCombinedLayers struct { + ContainerID string `jason:"ContainerID"` + ContainerRootPath string `json:"ContainerRootPath,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` + ScratchPath string `json:"ScratchPath,omitempty"` +} + +type WCOWCombinedLayers struct { ContainerRootPath string `json:"ContainerRootPath,omitempty"` Layers []hcsschema.Layer `json:"Layers,omitempty"` ScratchPath string `json:"ScratchPath,omitempty"` @@ -113,6 +120,7 @@ const ( ResourceTypeVPCIDevice ResourceType = "VPCIDevice" ResourceTypeContainerConstraints ResourceType = "ContainerConstraints" ResourceTypeHvSocket ResourceType = "HvSocket" + ResourceTypeSecurityPolicy ResourceType = "SecurityPolicy" ) // GuestRequest is for modify commands passed to the guest. diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go index ede997bb7e..89dfe1a157 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/devices.go @@ -14,6 +14,7 @@ import ( "github.com/Microsoft/hcsshim/internal/oci" "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -49,6 +50,11 @@ func getDeviceUtilHostPath() string { return filepath.Join(filepath.Dir(os.Args[0]), deviceUtilExeName) } +func isDeviceExtensionsSupported() bool { + // device extensions support was added from 20348 onwards. + return osversion.Build() >= 20348 +} + // getDeviceExtensions is a helper function to read the files at `extensionPaths` and unmarshal the contents // into a `hcsshema.DeviceExtension` to be added to a container's hcs create document. func getDeviceExtensions(annotations map[string]string) (*hcsschema.ContainerDefinitionDevice, error) { @@ -56,6 +62,15 @@ func getDeviceExtensions(annotations map[string]string) (*hcsschema.ContainerDef if err != nil { return nil, err } + + if len(extensionPaths) == 0 { + return nil, nil + } + + if !isDeviceExtensionsSupported() { + return nil, fmt.Errorf("device extensions are not supported on this build (%d)", osversion.Build()) + } + results := &hcsschema.ContainerDefinitionDevice{ DeviceExtension: []hcsschema.DeviceExtension{}, } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go index 6b6a810ced..af9e6deb38 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go @@ -380,11 +380,7 @@ func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInter if err != nil { return nil, nil, err } - // Do not add empty extensions. It causes container creation to - // fail with invalid config error. - if len(extensions.DeviceExtension) > 0 { - v2Container.AdditionalDeviceNamespace = extensions - } + v2Container.AdditionalDeviceNamespace = extensions return v1, v2Container, nil } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go index c024605f73..a6f6e78949 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go @@ -42,7 +42,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * containerRootInUVM := r.ContainerRootInUVM() if coi.Spec.Windows != nil && len(coi.Spec.Windows.LayerFolders) > 0 { log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage") - rootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) + rootPath, err := layers.MountContainerLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go index ef0ae89c38..ef7c8c1373 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go @@ -53,7 +53,7 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) { log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage") containerRootInUVM := r.ContainerRootInUVM() - containerRootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) + containerRootPath, err := layers.MountContainerLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go index 86ff5e53a2..6b69ce9bbe 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go @@ -10,6 +10,7 @@ import ( "path/filepath" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/ospath" "github.com/Microsoft/hcsshim/internal/uvm" @@ -75,7 +76,8 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { // the host at `volumeMountPath`. // // TODO dcantah: Keep better track of the layers that are added, don't simply discard the SCSI, VSMB, etc. resource types gotten inside. -func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) { + +func MountContainerLayers(ctx context.Context, containerId string, layerFolders []string, guestRoot string, volumeMountPath string, uvm *uvmpkg.UtilityVM) (_ string, err error) { log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers") if uvm == nil { @@ -84,21 +86,56 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot, } path := layerFolders[len(layerFolders)-1] rest := layerFolders[:len(layerFolders)-1] - if err := wclayer.ActivateLayer(ctx, path); err != nil { - return "", err - } - defer func() { - if err != nil { - _ = wclayer.DeactivateLayer(ctx, path) + // Simple retry loop to handle some behavior on RS5. Loopback VHDs used to be mounted in a different manor on RS5 (ws2019) which led to some + // very odd cases where things would succeed when they shouldn't have, or we'd simply timeout if an operation took too long. Many + // parallel invocations of this code path and stressing the machine seem to bring out the issues, but all of the possible failure paths + // that bring about the errors we have observed aren't known. + // + // On 19h1+ this *shouldn't* be needed, but the logic is to break if everything succeeded so this is harmless and shouldn't need a version check. + var lErr error + for i := 0; i < 5; i++ { + lErr = func() (err error) { + if err := wclayer.ActivateLayer(ctx, path); err != nil { + return err + } + + defer func() { + if err != nil { + _ = wclayer.DeactivateLayer(ctx, path) + } + }() + + return wclayer.PrepareLayer(ctx, path, rest) + }() + + if lErr != nil { + // Common errors seen from the RS5 behavior mentioned above is ERROR_NOT_READY and ERROR_DEVICE_NOT_CONNECTED. The former occurs when HCS + // tries to grab the volume path of the disk but it doesn't succeed, usually because the disk isn't actually mounted. DEVICE_NOT_CONNECTED + // has been observed after launching multiple containers in parallel on a machine under high load. This has also been observed to be a trigger + // for ERROR_NOT_READY as well. + if hcserr, ok := lErr.(*hcserror.HcsError); ok { + if hcserr.Err == windows.ERROR_NOT_READY || hcserr.Err == windows.ERROR_DEVICE_NOT_CONNECTED { + continue + } + } + // This was a failure case outside of the commonly known error conditions, don't retry here. + return "", lErr } - }() - if err := wclayer.PrepareLayer(ctx, path, rest); err != nil { - return "", err + // No errors in layer setup, we can leave the loop + break } + // If we got unlucky and ran into one of the two errors mentioned five times in a row and left the loop, we need to check + // the loop error here and fail also. + if lErr != nil { + return "", errors.Wrap(lErr, "layer retry loop failed") + } + + // If any of the below fails, we want to detach the filter and unmount the disk. defer func() { if err != nil { _ = wclayer.UnprepareLayer(ctx, path) + _ = wclayer.DeactivateLayer(ctx, path) } }() @@ -212,7 +249,7 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot, rootfs = containerScratchPathInUVM } else { rootfs = ospath.Join(uvm.OS(), guestRoot, uvmpkg.RootfsPath) - err = uvm.CombineLayersLCOW(ctx, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) + err = uvm.CombineLayersLCOW(ctx, containerId, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) } if err != nil { return "", err @@ -326,9 +363,16 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe // Always remove the combined layers as they are part of scsi/vsmb/vpmem // removals. - if err := uvm.RemoveCombinedLayers(ctx, containerRootPath); err != nil { - log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") - retError = err + if uvm.OS() == "windows" { + if err := uvm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { + log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") + retError = err + } + } else { + if err := uvm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { + log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") + retError = err + } } // Unload the SCSI scratch path diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go index eb011c656a..e8acdb6422 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/annotations.go @@ -210,4 +210,7 @@ const ( // AnnotationNcproxyContainerID indicates whether or not to use the hcsshim container ID // when setting up ncproxy and computeagent AnnotationNcproxyContainerID = "io.microsoft.network.ncproxy.containerid" + + // AnnotationSecurityPolicy is used to specify a security policy for opengcs to enforce + AnnotationSecurityPolicy = "io.microsoft.virtualmachine.lcow.securitypolicy" ) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go index 06715099f0..9ddae4b445 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go @@ -329,6 +329,8 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( lopts.BootFilesPath = parseAnnotationsString(s.Annotations, AnnotationBootFilesRootPath, lopts.BootFilesPath) lopts.CPUGroupID = parseAnnotationsString(s.Annotations, AnnotationCPUGroupID, lopts.CPUGroupID) lopts.NetworkConfigProxy = parseAnnotationsString(s.Annotations, AnnotationNetworkConfigProxy, lopts.NetworkConfigProxy) + lopts.SecurityPolicy = parseAnnotationsString(s.Annotations, AnnotationSecurityPolicy, lopts.SecurityPolicy) + handleAnnotationPreferredRootFSType(ctx, s.Annotations, lopts) handleAnnotationKernelDirectBoot(ctx, s.Annotations, lopts) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go index 74c0ac70e2..5971cc49dc 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/combine_layers.go @@ -20,7 +20,7 @@ func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcssch GuestRequest: guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeCombinedLayers, RequestType: requesttype.Add, - Settings: guestrequest.CombinedLayers{ + Settings: guestrequest.WCOWCombinedLayers{ ContainerRootPath: containerRootPath, Layers: layerPaths, }, @@ -35,7 +35,7 @@ func (uvm *UtilityVM) CombineLayersWCOW(ctx context.Context, layerPaths []hcssch // // NOTE: `layerPaths`, `scrathPath`, and `rootfsPath` are paths from within the // UVM. -func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, layerPaths []string, scratchPath, rootfsPath string) error { +func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, containerId string, layerPaths []string, scratchPath, rootfsPath string) error { if uvm.operatingSystem != "linux" { return errNotSupported } @@ -48,7 +48,8 @@ func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, layerPaths []string GuestRequest: guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeCombinedLayers, RequestType: requesttype.Add, - Settings: guestrequest.CombinedLayers{ + Settings: guestrequest.LCOWCombinedLayers{ + ContainerID: containerId, ContainerRootPath: rootfsPath, Layers: layers, ScratchPath: scratchPath, @@ -61,12 +62,25 @@ func (uvm *UtilityVM) CombineLayersLCOW(ctx context.Context, layerPaths []string // RemoveCombinedLayers removes the previously combined layers at `rootfsPath`. // // NOTE: `rootfsPath` is the path from within the UVM. -func (uvm *UtilityVM) RemoveCombinedLayers(ctx context.Context, rootfsPath string) error { +func (uvm *UtilityVM) RemoveCombinedLayersWCOW(ctx context.Context, rootfsPath string) error { msr := &hcsschema.ModifySettingRequest{ GuestRequest: guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeCombinedLayers, RequestType: requesttype.Remove, - Settings: guestrequest.CombinedLayers{ + Settings: guestrequest.WCOWCombinedLayers{ + ContainerRootPath: rootfsPath, + }, + }, + } + return uvm.modify(ctx, msr) +} + +func (uvm *UtilityVM) RemoveCombinedLayersLCOW(ctx context.Context, rootfsPath string) error { + msr := &hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Remove, + Settings: guestrequest.LCOWCombinedLayers{ ContainerRootPath: rootfsPath, }, }, diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go index de18ca1c60..192d16a95a 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go @@ -75,6 +75,7 @@ type OptionsLCOW struct { PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false VPCIEnabled bool // Whether the kernel should enable pci + SecurityPolicy string // Optional security policy } // defaultLCOWOSBootFilesPath returns the default path used to locate the LCOW @@ -120,6 +121,7 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { PreferredRootFSType: PreferredRootFSTypeInitRd, EnableColdDiscardHint: false, VPCIEnabled: false, + SecurityPolicy: "", } if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go new file mode 100644 index 0000000000..e826b6d679 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/security_policy.go @@ -0,0 +1,50 @@ +package uvm + +import ( + "context" + "errors" + "fmt" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/pkg/securitypolicy" +) + +var ( + ErrBadPolicy = errors.New("your policy looks suspicious or is badly formatted") +) + +// SetSecurityPolicy tells the gcs instance in the UVM what policy to apply. +// +// This has to happen before we start mounting things or generally changing +// the state of the UVM after is has been measured at startup +func (uvm *UtilityVM) SetSecurityPolicy(ctx context.Context, policy string) error { + if uvm.operatingSystem != "linux" { + return errNotSupported + } + + uvm.m.Lock() + defer uvm.m.Unlock() + + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: securitypolicy.EncodedSecurityPolicy{ + SecurityPolicy: policy, + }, + } + + modification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeSecurityPolicy, + RequestType: requesttype.Add, + Settings: securitypolicy.EncodedSecurityPolicy{ + SecurityPolicy: policy, + }, + } + + if err := uvm.modify(ctx, modification); err != nil { + return fmt.Errorf("uvm::Policy: failed to modify utility VM configuration: %s", err) + } + + return nil +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicy.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicy.go new file mode 100644 index 0000000000..988b63713d --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicy.go @@ -0,0 +1,38 @@ +package securitypolicy + +// SecurityPolicy is the user supplied security policy to enforce. +type SecurityPolicy struct { + // Flag that when set to true allows for all checks to pass. Currently used + // to run with security policy enforcement "running dark"; checks can be in + // place but the default policy that is created on startup has AllowAll set + // to true, thus making policy enforcement effectively "off" from a logical + // standpoint. Policy enforcement isn't actually off as the policy is "allow + // everything:. + AllowAll bool `json:"allow_all"` + // One or more containers that are allowed to run + Containers []SecurityPolicyContainer `json:"containers"` +} + +// SecurityPolicyContainer contains information about a container that should be +// allowed to run. "Allowed to run" is a bit of misnomer. For example, we +// enforce that when an overlay file system is constructed that it must be a +// an ordering of layers (as seen through dm-verity root hashes of devices) +// that match a listing from Layers in one of any valid SecurityPolicyContainer +// entries. Once that overlay creation is allowed, the command could not match +// policy and running the command would be rejected. +type SecurityPolicyContainer struct { + // The command that we will allow the container to execute + Command string `json:"command"` + // An ordered list of dm-verity root hashes for each layer that makes up + // "a container". Containers are constructed as an overlay file system. The + // order that the layers are overlayed is important and needs to be enforced + // as part of policy. + Layers []string `json:"layers"` +} + +// EncodedSecurityPolicy is a JSON representation of SecurityPolicy that has +// been base64 encoded for storage in an annotation embedded within another +// JSON configuration +type EncodedSecurityPolicy struct { + SecurityPolicy string `json:"SecurityPolicy,omitempty"` +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go new file mode 100644 index 0000000000..53484afd29 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/pkg/securitypolicy/securitypolicyenforcer.go @@ -0,0 +1,204 @@ +package securitypolicy + +import ( + "errors" + "fmt" + "sync" +) + +type SecurityPolicyEnforcer interface { + EnforcePmemMountPolicy(target string, deviceHash string) (err error) + EnforceOverlayMountPolicy(containerID string, layerPaths []string) (err error) +} + +func NewSecurityPolicyEnforcer(policy *SecurityPolicy) (SecurityPolicyEnforcer, error) { + if policy == nil { + return nil, errors.New("security policy can't be nil") + } + + if policy.AllowAll { + return &OpenDoorSecurityPolicyEnforcer{}, nil + } else { + return NewStandardSecurityPolicyEnforcer(policy) + } +} + +type StandardSecurityPolicyEnforcer struct { + // The user supplied security policy. + SecurityPolicy SecurityPolicy + // Devices and ContainerIndexToContainerIds are used to build up an + // understanding of the containers running with a UVM as they come up and + // map them back to a container definition from the user supplied + // SecurityPolicy + // + // Devices is a listing of dm-verity root hashes seen when mounting a device + // stored in a "per-container basis". As the UVM goes through its process of + // bringing up containers, we have to piece together information about what + // is going on. + // + // At the time that devices are being mounted, we do not know a container + // that they will be used for; only that there is a device with a given root + // hash that being mounted. We check to make sure that the root hash for the + // devices is a root hash that exists for 1 or more layers in any container + // in the supplied SecurityPolicy. Each "seen" layer is recorded in devices + // as it is mounted. So for example, if a root hash mount is found for the + // device being mounted and the first layer of the first container then we + // record the root hash in Devices[0][0]. + // + // Later, when overlay filesystems created, we verify that the ordered layers + // for said overlay filesystem match one of the device orderings in Devices. + // When a match is found, the index in Devices is the same index in + // SecurityPolicy.Containers. Overlay filesystem creation is the first time we + // have a "container id" available to us. The container id identifies the + // container in question going forward. We record the mapping of Container + // index to container id so that when we have future operations like "run + // command" which come with a container id, we can find the corresponding + // container index and use that to look up the command in the appropriate + // SecurityPolicyContainer instance. + // + // As containers can have exactly the same base image and be "the same" at + // the time we are doing overlay, the ContainerIndexToContainerIds in an + // array of possible containers for a given container id. + // + // implementation details are availanle in: + // - EnforcePmemMountPolicy + // - EnforceOverlayMountPolicy + // - NewStandardSecurityPolicyEnforcer + Devices [][]string + ContainerIndexToContainerIds map[int][]string + // Mutex to prevent concurrent access to fields + mutex *sync.Mutex +} + +var _ SecurityPolicyEnforcer = (*StandardSecurityPolicyEnforcer)(nil) + +func NewStandardSecurityPolicyEnforcer(policy *SecurityPolicy) (*StandardSecurityPolicyEnforcer, error) { + if policy == nil { + return nil, errors.New("security policy can't be nil") + } + + // create new StandardSecurityPolicyEnforcer and add the new SecurityPolicy + // to it + // fill out corresponding devices structure by creating a "same shapped" + // devices listing that corresponds to our container root hash lists + // the devices list will get filled out as layers are mounted + devices := make([][]string, len(policy.Containers)) + + for i, container := range policy.Containers { + devices[i] = make([]string, len(container.Layers)) + } + + return &StandardSecurityPolicyEnforcer{ + SecurityPolicy: *policy, + Devices: devices, + ContainerIndexToContainerIds: map[int][]string{}, + mutex: &sync.Mutex{}, + }, nil +} + +func (policyState *StandardSecurityPolicyEnforcer) EnforcePmemMountPolicy(target string, deviceHash string) (err error) { + policyState.mutex.Lock() + defer policyState.mutex.Unlock() + + if len(policyState.SecurityPolicy.Containers) < 1 { + return errors.New("policy doesn't allow mounting containers") + } + + if deviceHash == "" { + return errors.New("device is missing verity root hash.") + } + + found := false + + for i, container := range policyState.SecurityPolicy.Containers { + for ii, layer := range container.Layers { + if deviceHash == layer { + policyState.Devices[i][ii] = target + found = true + } + } + } + + if !found { + return fmt.Errorf("roothash %s for mount %s doesn't match policy", deviceHash, target) + } + + return nil +} + +func (policyState *StandardSecurityPolicyEnforcer) EnforceOverlayMountPolicy(containerID string, layerPaths []string) (err error) { + policyState.mutex.Lock() + defer policyState.mutex.Unlock() + + if len(policyState.SecurityPolicy.Containers) < 1 { + return errors.New("policy doesn't allow mounting containers") + } + + // find maximum number of containers that could share this overlay + maxPossibleContainerIdsForOverlay := 0 + for _, device_list := range policyState.Devices { + if equalForOverlay(layerPaths, device_list) { + maxPossibleContainerIdsForOverlay++ + } + } + + if maxPossibleContainerIdsForOverlay == 0 { + errmsg := fmt.Sprintf("layerPaths '%v' doesn't match any valid layer path: '%v'", layerPaths, policyState.Devices) + return errors.New(errmsg) + } + + for i, device_list := range policyState.Devices { + if equalForOverlay(layerPaths, device_list) { + existing := policyState.ContainerIndexToContainerIds[i] + if len(existing) < maxPossibleContainerIdsForOverlay { + policyState.ContainerIndexToContainerIds[i] = append(existing, containerID) + } else { + errmsg := fmt.Sprintf("layerPaths '%v' already used in maximum number of container overlays", layerPaths) + return errors.New(errmsg) + } + } + } + + return nil +} + +func equalForOverlay(a1 []string, a2 []string) bool { + // We've stored the layers from bottom to topl they are in layerPaths as + // top to bottom (the order a string gets concatenated for the unix mount + // command). W do our check with that in mind. + if len(a1) == len(a2) { + top_index := len(a2) - 1 + for i, v := range a1 { + if v != a2[top_index-i] { + return false + } + } + } else { + return false + } + return true +} + +type OpenDoorSecurityPolicyEnforcer struct{} + +var _ SecurityPolicyEnforcer = (*OpenDoorSecurityPolicyEnforcer)(nil) + +func (p *OpenDoorSecurityPolicyEnforcer) EnforcePmemMountPolicy(target string, deviceHash string) (err error) { + return nil +} + +func (p *OpenDoorSecurityPolicyEnforcer) EnforceOverlayMountPolicy(containerID string, layerPaths []string) (err error) { + return nil +} + +type ClosedDoorSecurityPolicyEnforcer struct{} + +var _ SecurityPolicyEnforcer = (*ClosedDoorSecurityPolicyEnforcer)(nil) + +func (p *ClosedDoorSecurityPolicyEnforcer) EnforcePmemMountPolicy(target string, deviceHash string) (err error) { + return errors.New("mounting is denied by policy") +} + +func (p *ClosedDoorSecurityPolicyEnforcer) EnforceOverlayMountPolicy(containerID string, layerPaths []string) (err error) { + return errors.New("creating an overlay fs is denied by policy") +} diff --git a/test/vendor/modules.txt b/test/vendor/modules.txt index e8547042d3..4ae47aca36 100644 --- a/test/vendor/modules.txt +++ b/test/vendor/modules.txt @@ -66,6 +66,7 @@ github.com/Microsoft/hcsshim/osversion github.com/Microsoft/hcsshim/pkg/go-runhcs github.com/Microsoft/hcsshim/pkg/ociwclayer github.com/Microsoft/hcsshim/pkg/octtrpc +github.com/Microsoft/hcsshim/pkg/securitypolicy # github.com/blang/semver v3.5.1+incompatible github.com/blang/semver # github.com/containerd/cgroups v1.0.1