diff --git a/internal/guestrequest/types.go b/internal/guestrequest/types.go index bfe83eab44..5ac526102d 100644 --- a/internal/guestrequest/types.go +++ b/internal/guestrequest/types.go @@ -45,10 +45,18 @@ type LCOWMappedDirectory struct { ReadOnly bool `json:"ReadOnly,omitempty"` } +// LCOWMappedLayer is one of potentially multiple read-only layers mapped on a VPMem device +type LCOWMappedLayer struct { + DeviceOffsetInBytes uint64 `json:"DeviceOffsetInBytes,omitempty"` + DeviceSizeInBytes uint64 `json:"DeviceSizeInBytes,omitempty"` +} + // Read-only layers over VPMem type LCOWMappedVPMemDevice struct { DeviceNumber uint32 `json:"DeviceNumber,omitempty"` MountPath string `json:"MountPath,omitempty"` + // Mapping is ignored when MountPath is not empty + MappingInfo *LCOWMappedLayer `json:"MappingInfo,omitempty"` } type LCOWMappedVPCIDevice struct { diff --git a/internal/hcs/schema2/virtual_p_mem_mapping.go b/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 0000000000..9ef322f615 --- /dev/null +++ b/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/internal/layers/layers.go b/internal/layers/layers.go index b5ab18a7ca..35ae9a8a39 100644 --- a/internal/layers/layers.go +++ b/internal/layers/layers.go @@ -212,14 +212,14 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) if !uvm.DevicesPhysicallyBacked() { // We first try vPMEM and if it is full or the file is too large we // fall back to SCSI. - uvmPath, err = uvm.AddVPMEM(ctx, layerPath) + uvmPath, err = uvm.AddVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Added LCOW layer") return uvmPath, nil - } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMEMLayerSize { + } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMemLayerSize { return "", fmt.Errorf("failed to add VPMEM layer: %s", err) } } @@ -239,7 +239,7 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) error { // Assume it was added to vPMEM and fall back to SCSI - err := uvm.RemoveVPMEM(ctx, layerPath) + err := uvm.RemoveVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, diff --git a/internal/memory/pool.go b/internal/memory/pool.go new file mode 100644 index 0000000000..6381dfd887 --- /dev/null +++ b/internal/memory/pool.go @@ -0,0 +1,316 @@ +package memory + +import ( + "github.com/pkg/errors" +) + +const ( + minimumClassSize = MegaByte + maximumClassSize = 4 * GigaByte + memoryClassNumber = 7 +) + +var ( + ErrInvalidMemoryClass = errors.New("invalid memory class") + ErrEarlyMerge = errors.New("not all children have been freed") + ErrEmptyPoolOperation = errors.New("operation on empty pool") +) + +// GetMemoryClassType returns the minimum memory class type that can hold a device of +// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset +// intervals in between, for a total of 7 different classes. This function does not +// do a validity check +func GetMemoryClassType(s uint64) classType { + s = (s - 1) >> 20 + memCls := uint32(0) + for s > 0 { + s = s >> 2 + memCls++ + } + return classType(memCls) +} + +// GetMemoryClassSize returns size in bytes for a given memory class +func GetMemoryClassSize(memCls classType) (uint64, error) { + if memCls >= memoryClassNumber { + return 0, ErrInvalidMemoryClass + } + return minimumClassSize << (2 * memCls), nil +} + +// region represents a contiguous memory block +type region struct { + // parent region that has been split into 4 + parent *region + class classType + // offset represents offset in bytes + offset uint64 +} + +// memoryPool tracks free and busy (used) memory regions +type memoryPool struct { + free map[uint64]*region + busy map[uint64]*region +} + +// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c +// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory +// space. +// +// There are a total of 7 different region sizes that can be allocated, with the smallest +// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device). +// +// For efficiency and to reduce fragmentation an entire region is allocated when requested. +// When there's no available region of requested size, we try to allocate more memory for +// this particular size by splitting the next available larger region into smaller ones, e.g. +// if there's no region available for size class 0, we try splitting a region from class 1, +// then class 2 etc, until we are able to do so or hit the upper limit. +type PoolAllocator struct { + pools [memoryClassNumber]*memoryPool +} + +var _ MappedRegion = ®ion{} +var _ Allocator = &PoolAllocator{} + +func (r *region) Offset() uint64 { + return r.offset +} + +func (r *region) Size() uint64 { + sz, err := GetMemoryClassSize(r.class) + if err != nil { + panic(err) + } + return sz +} + +func (r *region) Type() classType { + return r.class +} + +func newEmptyMemoryPool() *memoryPool { + return &memoryPool{ + free: make(map[uint64]*region), + busy: make(map[uint64]*region), + } +} + +func NewPoolMemoryAllocator() PoolAllocator { + pa := PoolAllocator{} + p := newEmptyMemoryPool() + // by default we allocate a single region with maximum possible size (class type) + p.free[0] = ®ion{ + class: memoryClassNumber - 1, + offset: 0, + } + pa.pools[memoryClassNumber-1] = p + return pa +} + +// Allocate checks memory region pool for the given `size` and returns a free region with +// minimal offset, if none available tries expanding matched memory pool. +// +// Internally it's done via moving a region from free pool into a busy pool +func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { + memCls := GetMemoryClassType(size) + if memCls >= memoryClassNumber { + return nil, ErrInvalidMemoryClass + } + + // find region with the smallest offset + nextCls, nextOffset, err := pa.findNextOffset(memCls) + if err != nil { + return nil, err + } + + // this means that there are no more regions for the current class, try expanding + if nextCls != memCls { + if err := pa.split(memCls); err != nil { + if err == ErrInvalidMemoryClass { + return nil, ErrNotEnoughSpace + } + return nil, err + } + } + + if err := pa.markBusy(memCls, nextOffset); err != nil { + return nil, err + } + + // by this point memory pool for memCls should have been created, + // either prior or during split call + if r := pa.pools[memCls].busy[nextOffset]; r != nil { + return r, nil + } + + return nil, ErrNotEnoughSpace +} + +// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into +// a bigger one +func (pa *PoolAllocator) Release(reg MappedRegion) error { + mp := pa.pools[reg.Type()] + if mp == nil { + return ErrEmptyPoolOperation + } + + err := pa.markFree(reg.Type(), reg.Offset()) + if err != nil { + return err + } + + n := mp.free[reg.Offset()] + if n == nil { + return ErrNotAllocated + } + if err := pa.merge(n.parent); err != nil { + if err != ErrEarlyMerge { + return err + } + } + return nil +} + +// findNextOffset finds next region location for a given memCls +func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) { + for mc := memCls; mc < memoryClassNumber; mc++ { + pi := pa.pools[mc] + if pi == nil || len(pi.free) == 0 { + continue + } + + target := maximumClassSize + for offset := range pi.free { + if offset < target { + target = offset + } + } + return mc, target, nil + } + return 0, 0, ErrNotEnoughSpace +} + +// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit +func (pa *PoolAllocator) split(clsType classType) error { + nextClsType := clsType + 1 + if nextClsType >= memoryClassNumber { + return ErrInvalidMemoryClass + } + + nextPool := pa.pools[nextClsType] + if nextPool == nil { + nextPool = newEmptyMemoryPool() + pa.pools[nextClsType] = nextPool + } + + cls, offset, err := pa.findNextOffset(nextClsType) + if err != nil { + return err + } + // not enough memory in the next class, try to recursively expand + if cls != nextClsType { + if err := pa.split(nextClsType); err != nil { + return err + } + } + + if err := pa.markBusy(nextClsType, offset); err != nil { + return err + } + + // memCls validity has been checked already, we can ignore the error + clsSize, _ := GetMemoryClassSize(clsType) + + nextReg := nextPool.busy[offset] + if nextReg == nil { + return ErrNotAllocated + } + + // expand memCls + cp := pa.pools[clsType] + if cp == nil { + cp = newEmptyMemoryPool() + pa.pools[clsType] = cp + } + // create 4 smaller regions + for i := uint64(0); i < 4; i++ { + offset := nextReg.offset + i*clsSize + reg := ®ion{ + parent: nextReg, + class: clsType, + offset: offset, + } + cp.free[offset] = reg + } + return nil +} + +func (pa *PoolAllocator) merge(parent *region) error { + // nothing to merge + if parent == nil { + return nil + } + + childCls := parent.class - 1 + childPool := pa.pools[childCls] + // no child nodes to merge, try to merge parent + if childPool == nil { + return pa.merge(parent.parent) + } + + childSize, err := GetMemoryClassSize(childCls) + if err != nil { + return err + } + + // check if all the child nodes are free + var children []*region + for i := uint64(0); i < 4; i++ { + child, free := childPool.free[parent.offset+i*childSize] + if !free { + return ErrEarlyMerge + } + children = append(children, child) + } + + // at this point all the child nodes will be free and we can merge + for _, child := range children { + delete(childPool.free, child.offset) + } + + if err := pa.markFree(parent.class, parent.offset); err != nil { + return err + } + + return pa.merge(parent.parent) +} + +// markFree internally moves a region with `offset` from busy to free map +func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.busy[offset]; exists { + clsPool.free[offset] = reg + delete(clsPool.busy, offset) + return nil + } + return ErrNotAllocated +} + +// markBusy internally moves a region with `offset` from free to busy map +func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.free[offset]; exists { + clsPool.busy[offset] = reg + delete(clsPool.free, offset) + return nil + } + return ErrNotAllocated +} diff --git a/internal/memory/pool_test.go b/internal/memory/pool_test.go new file mode 100644 index 0000000000..4098ae5106 --- /dev/null +++ b/internal/memory/pool_test.go @@ -0,0 +1,253 @@ +package memory + +import ( + "testing" +) + +// helper function to test and validate minimal allocation scenario +func testAllocate(t *testing.T, ma *PoolAllocator, sz uint64) { + _, err := ma.Allocate(sz) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(ma.pools[0].busy) != 1 { + t.Fatal("memory slot wasn't marked as busy") + } +} + +func Test_MemAlloc_findNextOffset(t *testing.T) { + ma := NewPoolMemoryAllocator() + cls, offset, err := ma.findNextOffset(0) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if cls != memoryClassNumber-1 { + t.Fatalf("expected class=%d, got %d", memoryClassNumber-1, cls) + } + if offset != 0 { + t.Fatalf("expected offset=%d, got %d", 0, offset) + } +} + +func Test_MemAlloc_allocate_without_expand(t *testing.T) { + ma := &PoolAllocator{} + ma.pools[0] = newEmptyMemoryPool() + ma.pools[0].free[0] = ®ion{ + class: 0, + offset: 0, + } + + testAllocate(t, ma, MegaByte) +} + +func Test_MemAlloc_allocate_not_enough_space(t *testing.T) { + ma := &PoolAllocator{} + + _, err := ma.Allocate(MegaByte) + if err == nil { + t.Fatal("expected error, got nil") + } + if err != ErrNotEnoughSpace { + t.Fatalf("expected error=%s, got error=%s", ErrNotEnoughSpace, err) + } +} + +func Test_MemAlloc_expand(t *testing.T) { + pa := &PoolAllocator{} + pa.pools[1] = newEmptyMemoryPool() + pa.pools[1].free[0] = ®ion{ + class: 1, + offset: 0, + } + + err := pa.split(0) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if _, o, err := pa.findNextOffset(1); err == nil { + t.Fatalf("no free offset should be found for class 1, got offset=%d", o) + } + + poolCls0 := pa.pools[0] + for i := 0; i < 4; i++ { + offset := uint64(i) * MegaByte + _, ok := poolCls0.free[offset] + if !ok { + t.Fatalf("did not find region with offset=%d", offset) + } + delete(poolCls0.free, offset) + } + + if len(poolCls0.free) > 0 { + t.Fatalf("extra memory regions: %v", poolCls0.free) + } +} + +func Test_MemAlloc_allocate_automatically_expands(t *testing.T) { + pa := &PoolAllocator{} + pa.pools[2] = newEmptyMemoryPool() + pa.pools[2].free[MegaByte] = ®ion{ + class: 2, + offset: MegaByte, + } + + testAllocate(t, pa, MegaByte) + + if pa.pools[1] == nil { + t.Fatalf("memory not extended for class type 1") + } + if len(pa.pools[2].free) > 0 { + t.Fatalf("expected no free regions for class type 2, got: %v", pa.pools[2].free) + } +} + +func Test_MemAlloc_alloc_and_release(t *testing.T) { + pa := &PoolAllocator{} + pa.pools[0] = newEmptyMemoryPool() + r := ®ion{ + class: 0, + offset: 0, + } + pa.pools[0].free[0] = r + + testAllocate(t, pa, MegaByte) + + err := pa.Release(r) + if err != nil { + t.Fatalf("error releasing resources: %s", err) + } + if len(pa.pools[0].busy) != 0 { + t.Fatalf("resources not marked as free: %v", pa.pools[0].busy) + } + if len(pa.pools[0].free) == 0 { + t.Fatal("resource not assigned back to the free pool") + } +} + +func Test_MemAlloc_alloc_invalid_larger_than_max(t *testing.T) { + pa := &PoolAllocator{} + + _, err := pa.Allocate(maximumClassSize + 1) + if err == nil { + t.Fatal("no error returned") + } + if err != ErrInvalidMemoryClass { + t.Fatalf("expected error=%s, got error=%s", ErrInvalidMemoryClass, err) + } +} + +func Test_MemAlloc_release_invalid_offset(t *testing.T) { + pa := &PoolAllocator{} + pa.pools[0] = newEmptyMemoryPool() + r := ®ion{ + class: 0, + offset: 0, + } + pa.pools[0].free[0] = r + + testAllocate(t, pa, MegaByte) + + // change the actual offset + r.offset = MegaByte + err := pa.Release(r) + if err == nil { + t.Fatal("no error returned") + } + if err != ErrNotAllocated { + t.Fatalf("wrong error returned: %s", err) + } +} + +func Test_MemAlloc_Max_Out(t *testing.T) { + ma := NewPoolMemoryAllocator() + for i := 0; i < 4096; i++ { + _, err := ma.Allocate(MegaByte) + if err != nil { + t.Fatalf("unexpected error during memory allocation: %s", err) + } + } + if len(ma.pools[0].busy) != 4096 { + t.Fatalf("expected 4096 busy blocks of class 0, got %d instead", len(ma.pools[0].busy)) + } + for i := 0; i < 4096; i++ { + offset := uint64(i) * MegaByte + if _, ok := ma.pools[0].busy[offset]; !ok { + t.Fatalf("expected to find offset %d", offset) + } + } +} + +func Test_GetMemoryClass(t *testing.T) { + type config struct { + name string + size uint64 + expected classType + } + + testCases := []config{ + { + name: "Size_1MB_Class_0", + size: MegaByte, + expected: 0, + }, + { + name: "Size_6MB_Class_2", + size: 6 * MegaByte, + expected: 2, + }, + { + name: "Size_2GB_Class_6", + size: 2 * GigaByte, + expected: 6, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := GetMemoryClassType(tc.size) + if c != tc.expected { + t.Fatalf("expected classType for size: %d is %d, got %d instead", tc.size, tc.expected, c) + } + }) + } +} + +func Test_GetMemoryClassSize(t *testing.T) { + type config struct { + name string + clsType classType + expected uint64 + err error + } + + testCases := []config{ + { + name: "Class_0_Size_1MB", + clsType: 0, + expected: minimumClassSize, + }, + { + name: "Class_8_Size_4GB", + clsType: 6, + expected: maximumClassSize, + }, + { + name: "Class_7_ErrorInvalidMemoryClass", + clsType: 7, + err: ErrInvalidMemoryClass, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s, err := GetMemoryClassSize(tc.clsType) + if err != tc.err { + t.Fatalf("expected error to be %s, got %s instead", tc.err, err) + } + if s != tc.expected { + t.Fatalf("expected size to be %d, got %d instead", tc.expected, s) + } + }) + } +} diff --git a/internal/memory/types.go b/internal/memory/types.go new file mode 100644 index 0000000000..7cf4368a95 --- /dev/null +++ b/internal/memory/types.go @@ -0,0 +1,28 @@ +package memory + +import "github.com/pkg/errors" + +type classType uint32 + +const ( + MegaByte = uint64(1024 * 1024) + GigaByte = 1024 * MegaByte +) + +var ( + ErrNotEnoughSpace = errors.New("not enough space") + ErrNotAllocated = errors.New("no memory allocated at the given offset") +) + +// MappedRegion represents a memory block with an offset +type MappedRegion interface { + Offset() uint64 + Size() uint64 + Type() classType +} + +// Allocator is an interface for memory allocation +type Allocator interface { + Allocate(uint64) (MappedRegion, error) + Release(MappedRegion) error +} diff --git a/internal/oci/uvm.go b/internal/oci/uvm.go index db3f429253..e821837ed0 100644 --- a/internal/oci/uvm.go +++ b/internal/oci/uvm.go @@ -134,6 +134,7 @@ const ( annotationBootFilesRootPath = "io.microsoft.virtualmachine.lcow.bootfilesrootpath" annotationKernelDirectBoot = "io.microsoft.virtualmachine.lcow.kerneldirectboot" annotationVPCIEnabled = "io.microsoft.virtualmachine.lcow.vpcienabled" + annotationVPMemNoMultiMapping = "io.microsoft.virtualmachine.lcow.vpmem.nomultimapping" annotationStorageQoSBandwidthMaximum = "io.microsoft.virtualmachine.storageqos.bandwidthmaximum" annotationStorageQoSIopsMaximum = "io.microsoft.virtualmachine.storageqos.iopsmaximum" annotationFullyPhysicallyBacked = "io.microsoft.virtualmachine.fullyphysicallybacked" @@ -456,6 +457,7 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( lopts.ProcessorWeight = ParseAnnotationsCPUWeight(ctx, s, annotationProcessorWeight, lopts.ProcessorWeight) lopts.VPMemDeviceCount = parseAnnotationsUint32(ctx, s.Annotations, annotationVPMemCount, lopts.VPMemDeviceCount) lopts.VPMemSizeBytes = parseAnnotationsUint64(ctx, s.Annotations, annotationVPMemSize, lopts.VPMemSizeBytes) + lopts.VPMemNoMultiMapping = parseAnnotationsBool(ctx, s.Annotations, annotationVPMemNoMultiMapping, lopts.VPMemNoMultiMapping) lopts.StorageQoSBandwidthMaximum = ParseAnnotationsStorageBps(ctx, s, annotationStorageQoSBandwidthMaximum, lopts.StorageQoSBandwidthMaximum) lopts.StorageQoSIopsMaximum = ParseAnnotationsStorageIops(ctx, s, annotationStorageQoSIopsMaximum, lopts.StorageQoSIopsMaximum) lopts.VPCIEnabled = parseAnnotationsBool(ctx, s.Annotations, annotationVPCIEnabled, lopts.VPCIEnabled) diff --git a/internal/schema2/virtual_p_mem_mapping.go b/internal/schema2/virtual_p_mem_mapping.go new file mode 100644 index 0000000000..9ef322f615 --- /dev/null +++ b/internal/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/internal/uvm/create_lcow.go b/internal/uvm/create_lcow.go index 92c7e8d478..de18ca1c60 100644 --- a/internal/uvm/create_lcow.go +++ b/internal/uvm/create_lcow.go @@ -14,6 +14,10 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "github.com/Microsoft/hcsshim/internal/gcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" @@ -22,9 +26,6 @@ import ( "github.com/Microsoft/hcsshim/internal/processorinfo" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) type PreferredRootFSType int @@ -70,6 +71,7 @@ type OptionsLCOW struct { OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken. VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`. + VPMemNoMultiMapping bool // Disables LCOW layer multi mapping PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false VPCIEnabled bool // Whether the kernel should enable pci @@ -114,6 +116,7 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { OutputHandler: parseLogrus(id), VPMemDeviceCount: DefaultVPMEMCount, VPMemSizeBytes: DefaultVPMemSizeBytes, + VPMemNoMultiMapping: osversion.Get().Build < osversion.V19H1, PreferredRootFSType: PreferredRootFSTypeInitRd, EnableColdDiscardHint: false, VPCIEnabled: false, @@ -170,6 +173,7 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error physicallyBacked: !opts.AllowOvercommit, devicesPhysicallyBacked: opts.FullyPhysicallyBacked, createOpts: opts, + vpmemMultiMapping: !opts.VPMemNoMultiMapping, } defer func() { @@ -291,11 +295,35 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error ImageFormat: imageFormat, }, } - // Add to our internal structure - uvm.vpmemDevices[0] = &vpmemInfo{ - hostPath: opts.RootFSFile, - uvmPath: "/", - refCount: 1, + if uvm.vpmemMultiMapping { + pmem := newPackedVPMemDevice() + pmem.maxMappedDeviceCount = 1 + + st, err := os.Stat(rootfsFullPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) + } + devSize := pageAlign(uint64(st.Size())) + memReg, err := pmem.Allocate(devSize) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + } + defer func() { + if err != nil { + if err = pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debug("failed to release memory region") + } + } + }() + + dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) + if err := pmem.mapVHDLayer(ctx, dev); err != nil { + return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + } + uvm.vpmemDevicesMultiMapped[0] = pmem + } else { + dev := newDefaultVPMemInfo(opts.RootFSFile, "/") + uvm.vpmemDevicesDefault[0] = dev } } diff --git a/internal/uvm/types.go b/internal/uvm/types.go index 24b632865c..1e568db98b 100644 --- a/internal/uvm/types.go +++ b/internal/uvm/types.go @@ -7,12 +7,13 @@ import ( "sync" "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" + "github.com/Microsoft/hcsshim/internal/gcs" "github.com/Microsoft/hcsshim/internal/hcs" "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/hns" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" - "golang.org/x/sys/windows" ) // | WCOW | LCOW @@ -21,14 +22,6 @@ import ( // Read-Only Layer | VSMB | VPMEM // Mapped Directory | VSMB | PLAN9 -// vpmemInfo is an internal structure used for determining VPMem devices mapped to -// a Linux utility VM. -type vpmemInfo struct { - hostPath string - uvmPath string - refCount uint32 -} - type nicInfo struct { ID string Endpoint *hns.HNSEndpoint @@ -81,9 +74,11 @@ type UtilityVM struct { // VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for // booting from VHD. - vpmemDevices [MaxVPMEMCount]*vpmemInfo // Limited by ACPI size. - vpmemMaxCount uint32 // The max number of VPMem devices. - vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device. + vpmemMaxCount uint32 // The max number of VPMem devices. + vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device. + vpmemMultiMapping bool // Enable mapping multiple VHDs onto a single VPMem device + vpmemDevicesDefault [MaxVPMEMCount]*vPMemInfoDefault + vpmemDevicesMultiMapped [MaxVPMEMCount]*vPMemInfoMulti // SCSI devices that are mapped into a Windows or Linux utility VM scsiLocations [4][64]*SCSIMount // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though. diff --git a/internal/uvm/vpmem.go b/internal/uvm/vpmem.go index 6d883220eb..061ea489b9 100644 --- a/internal/uvm/vpmem.go +++ b/internal/uvm/vpmem.go @@ -2,166 +2,190 @@ package uvm import ( "context" - "errors" "fmt" "os" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/guestrequest" "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/requesttype" - "github.com/sirupsen/logrus" ) const ( - lcowVPMEMLayerFmt = "/run/layers/p%d" + lcowDefaultVPMemLayerFmt = "/run/layers/p%d" ) var ( - // ErrMaxVPMEMLayerSize is the error returned when the size of `hostPath` is - // greater than the max vPMEM layer size set at create time. - ErrMaxVPMEMLayerSize = errors.New("layer size is to large for VPMEM max size") + // ErrMaxVPMemLayerSize is the error returned when the size of `hostPath` is + // greater than the max vPMem layer size set at create time. + ErrMaxVPMemLayerSize = errors.New("layer size is to large for VPMEM max size") ) -// findNextVPMEM finds the next available VPMem slot. +type vPMemInfoDefault struct { + hostPath string + uvmPath string + refCount uint32 +} + +func newDefaultVPMemInfo(hostPath, uvmPath string) *vPMemInfoDefault { + return &vPMemInfoDefault{ + hostPath: hostPath, + uvmPath: uvmPath, + refCount: 1, + } +} + +// findNextVPMemSlot finds next available VPMem slot. // -// The lock MUST be held when calling this function. -func (uvm *UtilityVM) findNextVPMEM(ctx context.Context, hostPath string) (uint32, error) { +// Lock MUST be held when calling this function. +func (uvm *UtilityVM) findNextVPMemSlot(ctx context.Context, hostPath string) (uint32, error) { for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if uvm.vpmemDevices[i] == nil { + if uvm.vpmemDevicesDefault[i] == nil { log.G(ctx).WithFields(logrus.Fields{ "hostPath": hostPath, "deviceNumber": i, - }).Debug("allocated VPMEM location") + }).Debug("allocated VPMem location") return i, nil } } return 0, ErrNoAvailableLocation } -// Lock must be held when calling this function -func (uvm *UtilityVM) findVPMEMDevice(ctx context.Context, findThisHostPath string) (uint32, error) { +// findVPMemSlot looks up `findThisHostPath` in already mounted VPMem devices +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) findVPMemSlot(ctx context.Context, findThisHostPath string) (uint32, error) { for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if vi := uvm.vpmemDevices[i]; vi != nil && vi.hostPath == findThisHostPath { + if vi := uvm.vpmemDevicesDefault[i]; vi != nil && vi.hostPath == findThisHostPath { log.G(ctx).WithFields(logrus.Fields{ "hostPath": vi.hostPath, "uvmPath": vi.uvmPath, "refCount": vi.refCount, "deviceNumber": i, - }).Debug("found VPMEM location") + }).Debug("found VPMem location") return i, nil } } return 0, ErrNotAttached } -// AddVPMEM adds a VPMEM disk to a utility VM at the next available location and +// addVPMemDefault adds a VPMem disk to a utility VM at the next available location and // returns the UVM path where the layer was mounted. -func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string) (_ string, err error) { - if uvm.operatingSystem != "linux" { - return "", errNotSupported +func (uvm *UtilityVM) addVPMemDefault(ctx context.Context, hostPath string) (_ string, err error) { + if devNumber, err := uvm.findVPMemSlot(ctx, hostPath); err == nil { + device := uvm.vpmemDevicesDefault[devNumber] + device.refCount++ + return device.uvmPath, nil } - uvm.m.Lock() - defer uvm.m.Unlock() + fi, err := os.Stat(hostPath) + if err != nil { + return "", err + } + if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes { + return "", ErrMaxVPMemLayerSize + } - var deviceNumber uint32 - deviceNumber, err = uvm.findVPMEMDevice(ctx, hostPath) + deviceNumber, err := uvm.findNextVPMemSlot(ctx, hostPath) if err != nil { - // We are going to add it so make sure it fits on vPMEM - fi, err := os.Stat(hostPath) - if err != nil { - return "", err - } - if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes { - return "", ErrMaxVPMEMLayerSize - } + return "", err + } + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.VirtualPMemDevice{ + HostPath: hostPath, + ReadOnly: true, + ImageFormat: "Vhd1", + }, + ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), + } + uvmPath := fmt.Sprintf(lcowDefaultVPMemLayerFmt, deviceNumber) + modification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: uvmPath, + }, + } - // It doesn't exist, so we're going to allocate and hot-add it - deviceNumber, err = uvm.findNextVPMEM(ctx, hostPath) - if err != nil { - return "", err - } + if err := uvm.modify(ctx, modification); err != nil { + return "", errors.Errorf("uvm::addVPMemDefault: failed to modify utility VM configuration: %s", err) + } - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - Settings: hcsschema.VirtualPMemDevice{ - HostPath: hostPath, - ReadOnly: true, - ImageFormat: "Vhd1", - }, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - } + uvm.vpmemDevicesDefault[deviceNumber] = newDefaultVPMemInfo(hostPath, uvmPath) + return uvmPath, nil +} + +// removeVPMemDefault removes a VPMem disk from a Utility VM. If the `hostPath` is not +// attached returns `ErrNotAttached`. +func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) error { + deviceNumber, err := uvm.findVPMemSlot(ctx, hostPath) + if err != nil { + return err + } + + device := uvm.vpmemDevicesDefault[deviceNumber] + if device.refCount > 1 { + device.refCount-- + return nil + } - uvmPath := fmt.Sprintf(lcowVPMEMLayerFmt, deviceNumber) - modification.GuestRequest = guestrequest.GuestRequest{ + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), + GuestRequest: guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeVPMemDevice, - RequestType: requesttype.Add, + RequestType: requesttype.Remove, Settings: guestrequest.LCOWMappedVPMemDevice{ DeviceNumber: deviceNumber, - MountPath: uvmPath, + MountPath: device.uvmPath, }, - } + }, + } + if err := uvm.modify(ctx, modification); err != nil { + return errors.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) + } + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": device.hostPath, + "uvmPath": device.uvmPath, + "refCount": device.refCount, + "deviceNumber": deviceNumber, + }).Debug("removed VPMEM location") - if err := uvm.modify(ctx, modification); err != nil { - return "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err) - } + uvm.vpmemDevicesDefault[deviceNumber] = nil - uvm.vpmemDevices[deviceNumber] = &vpmemInfo{ - hostPath: hostPath, - uvmPath: uvmPath, - refCount: 1, - } - return uvmPath, nil - } - device := uvm.vpmemDevices[deviceNumber] - device.refCount++ - return device.uvmPath, nil + return nil } -// RemoveVPMEM removes a VPMEM disk from a Utility VM. If the `hostPath` is not -// attached returns `ErrNotAttached`. -func (uvm *UtilityVM) RemoveVPMEM(ctx context.Context, hostPath string) (err error) { +func (uvm *UtilityVM) AddVPMem(ctx context.Context, hostPath string) (string, error) { if uvm.operatingSystem != "linux" { - return errNotSupported + return "", errNotSupported } uvm.m.Lock() defer uvm.m.Unlock() - deviceNumber, err := uvm.findVPMEMDevice(ctx, hostPath) - if err != nil { - return err + if uvm.vpmemMultiMapping { + return uvm.addVPMemMappedDevice(ctx, hostPath) } + return uvm.addVPMemDefault(ctx, hostPath) +} - device := uvm.vpmemDevices[deviceNumber] - if device.refCount == 1 { - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeVPMemDevice, - RequestType: requesttype.Remove, - Settings: guestrequest.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: device.uvmPath, - }, - }, - } +func (uvm *UtilityVM) RemoveVPMem(ctx context.Context, hostPath string) error { + if uvm.operatingSystem != "linux" { + return errNotSupported + } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) - } - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": device.hostPath, - "uvmPath": device.uvmPath, - "refCount": device.refCount, - "deviceNumber": deviceNumber, - }).Debug("removed VPMEM location") - uvm.vpmemDevices[deviceNumber] = nil - } else { - device.refCount-- + uvm.m.Lock() + defer uvm.m.Unlock() + + if uvm.vpmemMultiMapping { + return uvm.removeVPMemMappedDevice(ctx, hostPath) } - return nil + return uvm.removeVPMemDefault(ctx, hostPath) } diff --git a/internal/uvm/vpmem_mapped.go b/internal/uvm/vpmem_mapped.go new file mode 100644 index 0000000000..3faeff94db --- /dev/null +++ b/internal/uvm/vpmem_mapped.go @@ -0,0 +1,307 @@ +package uvm + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/ext4/tar2ext4" + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/memory" + "github.com/Microsoft/hcsshim/internal/requesttype" +) + +const ( + PageSize = 0x1000 + MaxMappedDeviceCount = 1024 +) + +const lcowPackedVPMemLayerFmt = "/run/layers/p%d-%d-%d" + +type mappedDeviceInfo struct { + vPMemInfoDefault + mappedRegion memory.MappedRegion + sizeInBytes uint64 +} + +type vPMemInfoMulti struct { + memory.PoolAllocator + maxSize uint64 + maxMappedDeviceCount uint32 + mappings map[string]*mappedDeviceInfo +} + +func newVPMemMappedDevice(hostPath, uvmPath string, sizeBytes uint64, memReg memory.MappedRegion) *mappedDeviceInfo { + return &mappedDeviceInfo{ + vPMemInfoDefault: vPMemInfoDefault{ + hostPath: hostPath, + uvmPath: uvmPath, + refCount: 1, + }, + mappedRegion: memReg, + sizeInBytes: sizeBytes, + } +} + +func newPackedVPMemDevice() *vPMemInfoMulti { + return &vPMemInfoMulti{ + PoolAllocator: memory.NewPoolMemoryAllocator(), + maxSize: DefaultVPMemSizeBytes, + mappings: make(map[string]*mappedDeviceInfo), + maxMappedDeviceCount: MaxMappedDeviceCount, + } +} + +func pageAlign(t uint64) uint64 { + if t%PageSize == 0 { + return t + } + return (t/PageSize + 1) * PageSize +} + +// fileSystemSize retrieves ext4 fs SuperBlock and calculates the size of the actual file system +func fileSystemSize(vhdPath string) (uint64, error) { + sb, err := tar2ext4.ReadExt4SuperBlock(vhdPath) + if err != nil { + return 0, err + } + blockSize := uint64(1024 * (1 << sb.LogBlockSize)) + fsSize := blockSize * uint64(sb.BlocksCountLow) + return pageAlign(fsSize), nil +} + +// newMappedVPMemModifyRequest creates an hcsschema.ModifySettingsRequest to modify VPMem devices/mappings +// for the multi-mapping setup +func newMappedVPMemModifyRequest(ctx context.Context, rType string, deviceNumber uint32, md *mappedDeviceInfo, uvm *UtilityVM) (*hcsschema.ModifySettingRequest, error) { + request := &hcsschema.ModifySettingRequest{ + RequestType: rType, + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: rType, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: md.uvmPath, + MappingInfo: &guestrequest.LCOWMappedLayer{ + DeviceOffsetInBytes: md.mappedRegion.Offset(), + DeviceSizeInBytes: md.sizeInBytes, + }, + }, + }, + } + + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + switch rType { + case requesttype.Add: + if pmem == nil { + request.Settings = hcsschema.VirtualPMemDevice{ + ReadOnly: true, + HostPath: md.hostPath, + ImageFormat: "Vhd1", + } + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) + } else { + request.Settings = hcsschema.VirtualPMemMapping{ + HostPath: md.hostPath, + ImageFormat: "Vhd1", + } + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) + } + case requesttype.Remove: + if pmem == nil { + return nil, errors.Errorf("no device found at location %d", deviceNumber) + } + if len(pmem.mappings) == 1 { + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) + } else { + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) + } + default: + return nil, errors.New("unsupported request type") + } + + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": deviceNumber, + "hostPath": md.hostPath, + "uvmPath": md.uvmPath, + }).Debugf("new mapped VPMem modify request: %v", request) + return request, nil +} + +// mapVHDLayer adds `device` to mappings +func (pmem *vPMemInfoMulti) mapVHDLayer(ctx context.Context, device *mappedDeviceInfo) (err error) { + if md, ok := pmem.mappings[device.hostPath]; ok { + md.refCount++ + return nil + } + + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": device.hostPath, + "mountPath": device.uvmPath, + "deviceOffset": device.mappedRegion.Offset(), + "deviceSize": device.sizeInBytes, + }).Debug("mapped new device") + + pmem.mappings[device.hostPath] = device + return nil +} + +// unmapVHDLayer removes mapped device with `hostPath` from mappings and releases allocated memory +func (pmem *vPMemInfoMulti) unmapVHDLayer(ctx context.Context, hostPath string) (err error) { + dev, ok := pmem.mappings[hostPath] + if !ok { + return ErrNotAttached + } + + if dev.refCount > 1 { + dev.refCount-- + return nil + } + + if err := pmem.Release(dev.mappedRegion); err != nil { + return err + } + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": dev.hostPath, + }).Debugf("Done releasing resources: %s", dev.hostPath) + delete(pmem.mappings, hostPath) + return nil +} + +// findVPMemMappedDevice finds a VHD device that's been mapped on VPMem surface +func (uvm *UtilityVM) findVPMemMappedDevice(ctx context.Context, hostPath string) (uint32, *mappedDeviceInfo, error) { + for i := uint32(0); i < uvm.vpmemMaxCount; i++ { + vi := uvm.vpmemDevicesMultiMapped[i] + if vi != nil { + if vhd, ok := vi.mappings[hostPath]; ok { + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": i, + "hostPath": hostPath, + "uvmPath": vhd.uvmPath, + "refCount": vhd.refCount, + "deviceSize": vhd.sizeInBytes, + "deviceOffset": vhd.mappedRegion.Offset(), + }).Debug("found mapped VHD") + return i, vhd, nil + } + } + } + return 0, nil, ErrNotAttached +} + +// allocateNextVPMemMappedDeviceLocation allocates a memory region with a minimum offset on the VPMem surface, +// where the device with a given `devSize` can be mapped. +func (uvm *UtilityVM) allocateNextVPMemMappedDeviceLocation(ctx context.Context, devSize uint64) (uint32, memory.MappedRegion, error) { + // device size has to be page aligned + devSize = pageAlign(devSize) + + for i := uint32(0); i < uvm.vpmemMaxCount; i++ { + pmem := uvm.vpmemDevicesMultiMapped[i] + if pmem == nil { + pmem = newPackedVPMemDevice() + uvm.vpmemDevicesMultiMapped[i] = pmem + } + + if len(pmem.mappings) >= int(pmem.maxMappedDeviceCount) { + continue + } + + reg, err := pmem.Allocate(devSize) + if err != nil { + continue + } + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": i, + "deviceOffset": reg.Offset(), + "deviceSize": devSize, + }).Debug("found offset for mapped VHD on an existing VPMem device") + return i, reg, nil + } + return 0, nil, ErrNoAvailableLocation +} + +// addVPMemMappedDevice adds container layer as a mapped device, first mapped device is added as a regular +// VPMem device, but subsequent additions will call into mapping APIs +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) addVPMemMappedDevice(ctx context.Context, hostPath string) (_ string, err error) { + if _, dev, err := uvm.findVPMemMappedDevice(ctx, hostPath); err == nil { + dev.refCount++ + return dev.uvmPath, nil + } + + devSize, err := fileSystemSize(hostPath) + if err != nil { + return "", err + } + deviceNumber, memReg, err := uvm.allocateNextVPMemMappedDeviceLocation(ctx, devSize) + if err != nil { + return "", err + } + defer func() { + if err != nil { + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + if err := pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debugf("failed to reclaim pmem region: %s", err) + } + } + }() + + uvmPath := fmt.Sprintf(lcowPackedVPMemLayerFmt, deviceNumber, memReg.Offset(), devSize) + md := newVPMemMappedDevice(hostPath, uvmPath, devSize, memReg) + modification, err := newMappedVPMemModifyRequest(ctx, requesttype.Add, deviceNumber, md, uvm) + if err := uvm.modify(ctx, modification); err != nil { + return "", errors.Errorf("uvm::addVPMemMappedDevice: failed to modify utility VM configuration: %s", err) + } + defer func() { + if err != nil { + rmRequest, _ := newMappedVPMemModifyRequest(ctx, requesttype.Remove, deviceNumber, md, uvm) + if err := uvm.modify(ctx, rmRequest); err != nil { + log.G(ctx).WithError(err).Debugf("failed to rollback modification") + } + } + }() + + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + if err := pmem.mapVHDLayer(ctx, md); err != nil { + return "", errors.Wrapf(err, "failed to update internal state") + } + return uvmPath, nil +} + +// removeVPMemMappedDevice removes a mapped container layer, if the layer is the last to be removed, removes +// VPMem device instead +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) removeVPMemMappedDevice(ctx context.Context, hostPath string) error { + devNum, md, err := uvm.findVPMemMappedDevice(ctx, hostPath) + if err != nil { + return err + } + if md.refCount > 1 { + md.refCount-- + return nil + } + + modification, err := newMappedVPMemModifyRequest(ctx, requesttype.Remove, devNum, md, uvm) + if err != nil { + return err + } + + if err := uvm.modify(ctx, modification); err != nil { + return errors.Errorf("failed to remove packed VPMem %s from UVM %s: %s", md.hostPath, uvm.id, err) + } + + pmem := uvm.vpmemDevicesMultiMapped[devNum] + if err := pmem.unmapVHDLayer(ctx, hostPath); err != nil { + log.G(ctx).WithError(err).Debugf("failed unmapping VHD layer %s", hostPath) + } + if len(pmem.mappings) == 0 { + uvm.vpmemDevicesMultiMapped[devNum] = nil + } + return nil +} diff --git a/internal/uvm/vpmem_mapped_test.go b/internal/uvm/vpmem_mapped_test.go new file mode 100644 index 0000000000..b3d1a119d0 --- /dev/null +++ b/internal/uvm/vpmem_mapped_test.go @@ -0,0 +1,82 @@ +package uvm + +import ( + "context" + "testing" + + "github.com/Microsoft/hcsshim/internal/memory" +) + +func setupNewVPMemScenario(ctx context.Context, t *testing.T, size uint64, hostPath, uvmPath string) (*vPMemInfoMulti, *mappedDeviceInfo) { + pmem := newPackedVPMemDevice() + memReg, err := pmem.Allocate(size) + if err != nil { + t.Fatalf("failed to setup multi-mapping VPMem Scenario: %s", err) + } + mappedDev := newVPMemMappedDevice(hostPath, uvmPath, size, memReg) + + if err := pmem.mapVHDLayer(ctx, mappedDev); err != nil { + t.Errorf("unexpected error: %s", err) + } + + // do some basic checks + md, ok := pmem.mappings[hostPath] + if !ok { + t.Fatalf("mapping '%s' not added", hostPath) + } + if md.hostPath != hostPath { + t.Fatalf("expected hostPath=%s, got hostPath=%s", hostPath, md.hostPath) + } + if md.uvmPath != uvmPath { + t.Fatalf("expected uvmPath=%s, got uvmPath=%s", uvmPath, md.uvmPath) + } + if md.refCount != 1 { + t.Fatalf("expected refCount=1, got refCount=%d", md.refCount) + } + + return pmem, md +} + +func Test_VPMem_MapDevice_New(t *testing.T) { + // basic scenario already validated in the helper function + setupNewVPMemScenario(context.TODO(), t, memory.MegaByte, "foo", "bar") +} + +func Test_VPMem_UnmapDevice_With_Removal(t *testing.T) { + pmem, _ := setupNewVPMemScenario(context.TODO(), t, memory.MegaByte, "foo", "bar") + + err := pmem.unmapVHDLayer(context.TODO(), "foo") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if m, ok := pmem.mappings["foo"]; ok { + t.Fatalf("mapping should've been removed: %v", m) + } +} + +func Test_VPMem_UnmapDevice_Without_Removal(t *testing.T) { + pmem, mappedDevice := setupNewVPMemScenario(context.TODO(), t, memory.MegaByte, "foo", "bar") + err := pmem.mapVHDLayer(context.TODO(), mappedDevice) + if err != nil { + t.Fatalf("unexpected error when mapping device: %s", err) + } + m, ok := pmem.mappings["foo"] + if !ok { + t.Fatalf("mapping not found") + } + if m.refCount != 2 { + t.Fatalf("expected refCount=2, got refCount=%d", m.refCount) + } + + err = pmem.unmapVHDLayer(context.TODO(), "foo") + if err != nil { + t.Fatalf("error unmapping device: %s", err) + } + m, ok = pmem.mappings["foo"] + if !ok { + t.Fatalf("mapping should still be present") + } + if m.refCount != 1 { + t.Fatalf("expected refCount=1, got refCount=%d", m.refCount) + } +} diff --git a/test/cri-containerd/container_layers_packing_test.go b/test/cri-containerd/container_layers_packing_test.go new file mode 100644 index 0000000000..4131e454bb --- /dev/null +++ b/test/cri-containerd/container_layers_packing_test.go @@ -0,0 +1,190 @@ +// +build functional + +package cri_containerd + +import ( + "bufio" + "bytes" + "context" + "fmt" + "strings" + "testing" + + "github.com/Microsoft/hcsshim/internal/shimdiag" + "github.com/Microsoft/hcsshim/osversion" + testutilities "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +const ( + ubuntu1804 = "ubuntu:18.04" + ubuntu70ExtraLayers = "cplatpublic.azurecr.io/ubuntu70extra:18.04" + alpine70ExtraLayers = "cplatpublic.azurecr.io/alpine70extra:latest" +) + +func filterStrings(input []string, include string) []string { + var result []string + for _, str := range input { + if strings.Contains(str, include) { + result = append(result, str) + } + } + return result +} + +func shimDiagExec(ctx context.Context, t *testing.T, podID string, cmd []string) string { + shimName := fmt.Sprintf("k8s.io-%s", podID) + shim, err := shimdiag.GetShim(shimName) + if err != nil { + t.Fatalf("failed to find shim %v: %v", shimName, err) + } + shimClient := shimdiag.NewShimDiagClient(shim) + + bufOut := &bytes.Buffer{} + bw := bufio.NewWriter(bufOut) + bufErr := &bytes.Buffer{} + bwErr := bufio.NewWriter(bufErr) + + exitCode, err := execInHost(ctx, shimClient, cmd, nil, bw, bwErr) + if err != nil { + t.Fatalf("failed to exec request in the host with: %v and %v", err, bufErr.String()) + } + if exitCode != 0 { + t.Fatalf("exec request in host failed with exit code %v: %v", exitCode, bufErr.String()) + } + + return strings.TrimSpace(bufOut.String()) +} + +func validateTargets(ctx context.Context, t *testing.T, deviceNumber int, podID string, expected int) { + dmDiag := shimDiagExec(ctx, t, podID, []string{"ls", "-l", "/dev/mapper"}) + dmPattern := fmt.Sprintf("dm-linear-pmem%d", deviceNumber) + dmLines := filterStrings(strings.Split(dmDiag, "\n"), dmPattern) + + lrDiag := shimDiagExec(ctx, t, podID, []string{"ls", "-l", "/run/layers"}) + lrPattern := fmt.Sprintf("p%d", deviceNumber) + lrLines := filterStrings(strings.Split(lrDiag, "\n"), lrPattern) + if len(lrLines) != len(dmLines) { + t.Fatalf("number of layers and device-mapper targets mismatch:\n%s\n%s", dmDiag, lrDiag) + } + + if len(dmLines) != expected { + t.Fatalf("expected %d layers, got %d.\n%s\n%s", expected, len(dmLines), dmDiag, lrDiag) + } +} + +func Test_Container_Layer_Packing_On_VPMem(t *testing.T) { + testutilities.RequiresBuild(t, osversion.V19H1) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + requireFeatures(t, featureLCOW) + + // use ubuntu to make sure that multiple container layers will be mapped properly + pullRequiredLcowImages(t, []string{imageLcowK8sPause, ubuntu1804}) + + type config struct { + rootfsType string + deviceNumber int + } + + for _, scenario := range []config{ + { + rootfsType: "initrd", + deviceNumber: 0, + }, + { + rootfsType: "vhd", + deviceNumber: 1, + }, + } { + t.Run(fmt.Sprintf("PreferredRootFSType-%s", scenario.rootfsType), func(t *testing.T) { + annotations := map[string]string{ + "io.microsoft.virtualmachine.lcow.preferredrootfstype": scenario.rootfsType, + } + podReq := getRunPodSandboxRequest(t, lcowRuntimeHandler, annotations) + podID := runPodSandbox(t, client, ctx, podReq) + defer removePodSandbox(t, client, ctx, podID) + + cmd := []string{"bash", "-c", "while true; do echo 'Hello, World!'; sleep 1; done"} + contReq := getCreateContainerRequest(podID, "ubuntu_latest", ubuntu1804, cmd, podReq.Config) + containerID := createContainer(t, client, ctx, contReq) + defer removeContainer(t, client, ctx, containerID) + + startContainer(t, client, ctx, containerID) + + // check initial targets + // NOTE: as of 02/03/2021, ubuntu:18.04 image has 3 image layers and k8s pause container has 1 layer + validateTargets(ctx, t, scenario.deviceNumber, podID, 4) + + // stop container + stopContainer(t, client, ctx, containerID) + // only pause container layer should be mounted at this point + validateTargets(ctx, t, scenario.deviceNumber, podID, 1) + }) + } +} + +func Test_Many_Container_Layers_Supported_On_VPMem(t *testing.T) { + testutilities.RequiresBuild(t, osversion.V19H1) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + requireFeatures(t, featureLCOW) + + pullRequiredLcowImages(t, []string{imageLcowK8sPause, alpine70ExtraLayers, ubuntu70ExtraLayers}) + + podReq := getRunPodSandboxRequest(t, lcowRuntimeHandler, nil) + podID := runPodSandbox(t, client, ctx, podReq) + defer removePodSandbox(t, client, ctx, podID) + + cmd := []string{"bash", "-c", "while true; do echo 'Hello, World!'; sleep 1; done"} + + contReq1 := getCreateContainerRequest(podID, "ubuntu70extra", ubuntu70ExtraLayers, cmd, podReq.Config) + containerID1 := createContainer(t, client, ctx, contReq1) + defer removeContainer(t, client, ctx, containerID1) + startContainer(t, client, ctx, containerID1) + defer stopContainer(t, client, ctx, containerID1) + + cmd = []string{"ash", "-c", "while true; do echo 'Hello, World!'; sleep 1; done"} + contReq2 := getCreateContainerRequest(podID, "alpine70extra", alpine70ExtraLayers, cmd, podReq.Config) + containerID2 := createContainer(t, client, ctx, contReq2) + defer removeContainer(t, client, ctx, containerID2) + startContainer(t, client, ctx, containerID2) + defer stopContainer(t, client, ctx, containerID2) +} + +func Test_Annotation_Disable_Multi_Mapping(t *testing.T) { + testutilities.RequiresBuild(t, osversion.V19H1) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + requireFeatures(t, featureLCOW) + + pullRequiredLcowImages(t, []string{imageLcowK8sPause, alpine70ExtraLayers}) + + annotations := map[string]string{ + "io.microsoft.virtualmachine.lcow.vpmem.nomultimapping": "true", + } + podReq := getRunPodSandboxRequest(t, lcowRuntimeHandler, annotations) + podID := runPodSandbox(t, client, ctx, podReq) + defer removePodSandbox(t, client, ctx, podID) + + cmd := []string{"bash", "-c", "while true; do echo 'Hello, World!'; sleep 1; done"} + contReq := getCreateContainerRequest(podID, "ubuntu", ubuntu70ExtraLayers, cmd, podReq.Config) + containerID := createContainer(t, client, ctx, contReq) + defer removeContainer(t, client, ctx, containerID) + startContainer(t, client, ctx, containerID) + defer stopContainer(t, client, ctx, containerID) + + dmDiag := shimDiagExec(ctx, t, podID, []string{"ls", "-l", "/dev/mapper"}) + filtered := filterStrings(strings.Split(dmDiag, "\n"), "dm-linear") + if len(filtered) > 0 { + t.Fatalf("no linear devices should've been created.\n%s", dmDiag) + } +} diff --git a/test/cri-containerd/test-images/layer_packing_alpine/Dockerfile b/test/cri-containerd/test-images/layer_packing_alpine/Dockerfile new file mode 100644 index 0000000000..501ae35290 --- /dev/null +++ b/test/cri-containerd/test-images/layer_packing_alpine/Dockerfile @@ -0,0 +1,78 @@ +# This Dockerfile builds a docker image based on alpine:latest, which includes +# additional 70 layers. The image is used in cri-containerd tests and validate +# VPMem multi-mapping and LCOW container layer packing feature +# +# The image is available at cplatpublic.azurecr.io/alpine70extra:latest + +FROM alpine:latest + +RUN echo "extra alpine layer #1" >> ~/extra-layer-1.txt +RUN echo "extra alpine layer #2" >> ~/extra-layer-2.txt +RUN echo "extra alpine layer #3" >> ~/extra-layer-3.txt +RUN echo "extra alpine layer #4" >> ~/extra-layer-4.txt +RUN echo "extra alpine layer #5" >> ~/extra-layer-5.txt +RUN echo "extra alpine layer #6" >> ~/extra-layer-6.txt +RUN echo "extra alpine layer #7" >> ~/extra-layer-7.txt +RUN echo "extra alpine layer #8" >> ~/extra-layer-8.txt +RUN echo "extra alpine layer #9" >> ~/extra-layer-9.txt +RUN echo "extra alpine layer #10" >> ~/extra-layer-10.txt +RUN echo "extra alpine layer #11" >> ~/extra-layer-11.txt +RUN echo "extra alpine layer #12" >> ~/extra-layer-12.txt +RUN echo "extra alpine layer #13" >> ~/extra-layer-13.txt +RUN echo "extra alpine layer #14" >> ~/extra-layer-14.txt +RUN echo "extra alpine layer #15" >> ~/extra-layer-15.txt +RUN echo "extra alpine layer #16" >> ~/extra-layer-16.txt +RUN echo "extra alpine layer #17" >> ~/extra-layer-17.txt +RUN echo "extra alpine layer #18" >> ~/extra-layer-18.txt +RUN echo "extra alpine layer #19" >> ~/extra-layer-19.txt +RUN echo "extra alpine layer #20" >> ~/extra-layer-20.txt +RUN echo "extra alpine layer #21" >> ~/extra-layer-21.txt +RUN echo "extra alpine layer #22" >> ~/extra-layer-22.txt +RUN echo "extra alpine layer #23" >> ~/extra-layer-23.txt +RUN echo "extra alpine layer #24" >> ~/extra-layer-24.txt +RUN echo "extra alpine layer #25" >> ~/extra-layer-25.txt +RUN echo "extra alpine layer #26" >> ~/extra-layer-26.txt +RUN echo "extra alpine layer #27" >> ~/extra-layer-27.txt +RUN echo "extra alpine layer #28" >> ~/extra-layer-28.txt +RUN echo "extra alpine layer #29" >> ~/extra-layer-29.txt +RUN echo "extra alpine layer #30" >> ~/extra-layer-30.txt +RUN echo "extra alpine layer #31" >> ~/extra-layer-31.txt +RUN echo "extra alpine layer #32" >> ~/extra-layer-32.txt +RUN echo "extra alpine layer #33" >> ~/extra-layer-33.txt +RUN echo "extra alpine layer #34" >> ~/extra-layer-34.txt +RUN echo "extra alpine layer #35" >> ~/extra-layer-35.txt +RUN echo "extra alpine layer #36" >> ~/extra-layer-36.txt +RUN echo "extra alpine layer #37" >> ~/extra-layer-37.txt +RUN echo "extra alpine layer #38" >> ~/extra-layer-38.txt +RUN echo "extra alpine layer #39" >> ~/extra-layer-39.txt +RUN echo "extra alpine layer #40" >> ~/extra-layer-40.txt +RUN echo "extra alpine layer #41" >> ~/extra-layer-41.txt +RUN echo "extra alpine layer #42" >> ~/extra-layer-42.txt +RUN echo "extra alpine layer #43" >> ~/extra-layer-43.txt +RUN echo "extra alpine layer #44" >> ~/extra-layer-44.txt +RUN echo "extra alpine layer #45" >> ~/extra-layer-45.txt +RUN echo "extra alpine layer #46" >> ~/extra-layer-46.txt +RUN echo "extra alpine layer #47" >> ~/extra-layer-47.txt +RUN echo "extra alpine layer #48" >> ~/extra-layer-48.txt +RUN echo "extra alpine layer #49" >> ~/extra-layer-49.txt +RUN echo "extra alpine layer #50" >> ~/extra-layer-50.txt +RUN echo "extra alpine layer #51" >> ~/extra-layer-51.txt +RUN echo "extra alpine layer #52" >> ~/extra-layer-52.txt +RUN echo "extra alpine layer #53" >> ~/extra-layer-53.txt +RUN echo "extra alpine layer #54" >> ~/extra-layer-54.txt +RUN echo "extra alpine layer #55" >> ~/extra-layer-55.txt +RUN echo "extra alpine layer #56" >> ~/extra-layer-56.txt +RUN echo "extra alpine layer #57" >> ~/extra-layer-57.txt +RUN echo "extra alpine layer #58" >> ~/extra-layer-58.txt +RUN echo "extra alpine layer #59" >> ~/extra-layer-59.txt +RUN echo "extra alpine layer #60" >> ~/extra-layer-60.txt +RUN echo "extra alpine layer #61" >> ~/extra-layer-61.txt +RUN echo "extra alpine layer #62" >> ~/extra-layer-62.txt +RUN echo "extra alpine layer #63" >> ~/extra-layer-63.txt +RUN echo "extra alpine layer #64" >> ~/extra-layer-64.txt +RUN echo "extra alpine layer #65" >> ~/extra-layer-65.txt +RUN echo "extra alpine layer #66" >> ~/extra-layer-66.txt +RUN echo "extra alpine layer #67" >> ~/extra-layer-67.txt +RUN echo "extra alpine layer #68" >> ~/extra-layer-68.txt +RUN echo "extra alpine layer #69" >> ~/extra-layer-69.txt +RUN echo "extra alpine layer #70" >> ~/extra-layer-70.txt diff --git a/test/cri-containerd/test-images/layer_packing_ubuntu/Dockerfile b/test/cri-containerd/test-images/layer_packing_ubuntu/Dockerfile new file mode 100644 index 0000000000..068f8dd935 --- /dev/null +++ b/test/cri-containerd/test-images/layer_packing_ubuntu/Dockerfile @@ -0,0 +1,78 @@ +# This Dockerfile builds a docker image based on ubuntu:18.04, which includes +# additional 70 layers. The image is used in cri-containerd tests and validate +# VPMem multi-mapping and LCOW container layer packing feature +# +# The image is available at cplatpublic.azurecr.io/ubuntu70extra:18.04 + +FROM ubuntu:18.04 + +RUN echo "extra ubuntu layer #1" >> ~/extra-layer-1.txt +RUN echo "extra ubuntu layer #2" >> ~/extra-layer-2.txt +RUN echo "extra ubuntu layer #3" >> ~/extra-layer-3.txt +RUN echo "extra ubuntu layer #4" >> ~/extra-layer-4.txt +RUN echo "extra ubuntu layer #5" >> ~/extra-layer-5.txt +RUN echo "extra ubuntu layer #6" >> ~/extra-layer-6.txt +RUN echo "extra ubuntu layer #7" >> ~/extra-layer-7.txt +RUN echo "extra ubuntu layer #8" >> ~/extra-layer-8.txt +RUN echo "extra ubuntu layer #9" >> ~/extra-layer-9.txt +RUN echo "extra ubuntu layer #10" >> ~/extra-layer-10.txt +RUN echo "extra ubuntu layer #11" >> ~/extra-layer-11.txt +RUN echo "extra ubuntu layer #12" >> ~/extra-layer-12.txt +RUN echo "extra ubuntu layer #13" >> ~/extra-layer-13.txt +RUN echo "extra ubuntu layer #14" >> ~/extra-layer-14.txt +RUN echo "extra ubuntu layer #15" >> ~/extra-layer-15.txt +RUN echo "extra ubuntu layer #16" >> ~/extra-layer-16.txt +RUN echo "extra ubuntu layer #17" >> ~/extra-layer-17.txt +RUN echo "extra ubuntu layer #18" >> ~/extra-layer-18.txt +RUN echo "extra ubuntu layer #19" >> ~/extra-layer-19.txt +RUN echo "extra ubuntu layer #20" >> ~/extra-layer-20.txt +RUN echo "extra ubuntu layer #21" >> ~/extra-layer-21.txt +RUN echo "extra ubuntu layer #22" >> ~/extra-layer-22.txt +RUN echo "extra ubuntu layer #23" >> ~/extra-layer-23.txt +RUN echo "extra ubuntu layer #24" >> ~/extra-layer-24.txt +RUN echo "extra ubuntu layer #25" >> ~/extra-layer-25.txt +RUN echo "extra ubuntu layer #26" >> ~/extra-layer-26.txt +RUN echo "extra ubuntu layer #27" >> ~/extra-layer-27.txt +RUN echo "extra ubuntu layer #28" >> ~/extra-layer-28.txt +RUN echo "extra ubuntu layer #29" >> ~/extra-layer-29.txt +RUN echo "extra ubuntu layer #30" >> ~/extra-layer-30.txt +RUN echo "extra ubuntu layer #31" >> ~/extra-layer-31.txt +RUN echo "extra ubuntu layer #32" >> ~/extra-layer-32.txt +RUN echo "extra ubuntu layer #33" >> ~/extra-layer-33.txt +RUN echo "extra ubuntu layer #34" >> ~/extra-layer-34.txt +RUN echo "extra ubuntu layer #35" >> ~/extra-layer-35.txt +RUN echo "extra ubuntu layer #36" >> ~/extra-layer-36.txt +RUN echo "extra ubuntu layer #37" >> ~/extra-layer-37.txt +RUN echo "extra ubuntu layer #38" >> ~/extra-layer-38.txt +RUN echo "extra ubuntu layer #39" >> ~/extra-layer-39.txt +RUN echo "extra ubuntu layer #40" >> ~/extra-layer-40.txt +RUN echo "extra ubuntu layer #41" >> ~/extra-layer-41.txt +RUN echo "extra ubuntu layer #42" >> ~/extra-layer-42.txt +RUN echo "extra ubuntu layer #43" >> ~/extra-layer-43.txt +RUN echo "extra ubuntu layer #44" >> ~/extra-layer-44.txt +RUN echo "extra ubuntu layer #45" >> ~/extra-layer-45.txt +RUN echo "extra ubuntu layer #46" >> ~/extra-layer-46.txt +RUN echo "extra ubuntu layer #47" >> ~/extra-layer-47.txt +RUN echo "extra ubuntu layer #48" >> ~/extra-layer-48.txt +RUN echo "extra ubuntu layer #49" >> ~/extra-layer-49.txt +RUN echo "extra ubuntu layer #50" >> ~/extra-layer-50.txt +RUN echo "extra ubuntu layer #51" >> ~/extra-layer-51.txt +RUN echo "extra ubuntu layer #52" >> ~/extra-layer-52.txt +RUN echo "extra ubuntu layer #53" >> ~/extra-layer-53.txt +RUN echo "extra ubuntu layer #54" >> ~/extra-layer-54.txt +RUN echo "extra ubuntu layer #55" >> ~/extra-layer-55.txt +RUN echo "extra ubuntu layer #56" >> ~/extra-layer-56.txt +RUN echo "extra ubuntu layer #57" >> ~/extra-layer-57.txt +RUN echo "extra ubuntu layer #58" >> ~/extra-layer-58.txt +RUN echo "extra ubuntu layer #59" >> ~/extra-layer-59.txt +RUN echo "extra ubuntu layer #60" >> ~/extra-layer-60.txt +RUN echo "extra ubuntu layer #61" >> ~/extra-layer-61.txt +RUN echo "extra ubuntu layer #62" >> ~/extra-layer-62.txt +RUN echo "extra ubuntu layer #63" >> ~/extra-layer-63.txt +RUN echo "extra ubuntu layer #64" >> ~/extra-layer-64.txt +RUN echo "extra ubuntu layer #65" >> ~/extra-layer-65.txt +RUN echo "extra ubuntu layer #66" >> ~/extra-layer-66.txt +RUN echo "extra ubuntu layer #67" >> ~/extra-layer-67.txt +RUN echo "extra ubuntu layer #68" >> ~/extra-layer-68.txt +RUN echo "extra ubuntu layer #69" >> ~/extra-layer-69.txt +RUN echo "extra ubuntu layer #70" >> ~/extra-layer-70.txt diff --git a/test/functional/uvm_vpmem_test.go b/test/functional/uvm_vpmem_test.go index 14de2f8feb..3d55881e92 100644 --- a/test/functional/uvm_vpmem_test.go +++ b/test/functional/uvm_vpmem_test.go @@ -33,7 +33,7 @@ func TestVPMEM(t *testing.T) { defer os.RemoveAll(tempDir) for i := 0; i < int(iterations); i++ { - uvmPath, err := u.AddVPMEM(ctx, filepath.Join(tempDir, "layer.vhd")) + uvmPath, err := u.AddVPMem(ctx, filepath.Join(tempDir, "layer.vhd")) if err != nil { t.Fatalf("AddVPMEM failed: %s", err) } @@ -42,7 +42,7 @@ func TestVPMEM(t *testing.T) { // Remove them all for i := 0; i < int(iterations); i++ { - if err := u.RemoveVPMEM(ctx, filepath.Join(tempDir, "layer.vhd")); err != nil { + if err := u.RemoveVPMem(ctx, filepath.Join(tempDir, "layer.vhd")); err != nil { t.Fatalf("RemoveVPMEM failed: %s", err) } } diff --git a/test/go.sum b/test/go.sum index 51c80c3442..da9ee1427b 100644 --- a/test/go.sum +++ b/test/go.sum @@ -9,6 +9,7 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -47,7 +48,9 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -142,6 +145,7 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -182,6 +186,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -195,9 +200,13 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -234,11 +243,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -268,6 +281,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -294,6 +308,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -302,6 +317,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -314,6 +330,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -342,6 +359,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -363,14 +381,15 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -382,6 +401,7 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -398,26 +418,33 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -487,6 +514,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -628,6 +656,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -639,7 +668,9 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -657,6 +688,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= @@ -677,6 +709,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -702,6 +735,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -730,9 +764,11 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -746,6 +782,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -764,7 +801,11 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -782,10 +823,12 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -805,8 +848,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -851,6 +895,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -860,8 +905,12 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6 h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -869,6 +918,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go new file mode 100644 index 0000000000..1b914b6e44 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go @@ -0,0 +1,195 @@ +package dmverity + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "fmt" + "io" + "os" + + "github.com/pkg/errors" + + "github.com/Microsoft/hcsshim/ext4/internal/compactext4" +) + +const ( + blockSize = compactext4.BlockSize +) + +var salt = bytes.Repeat([]byte{0}, 32) + +var ( + ErrSuperBlockReadFailure = errors.New("failed to read dm-verity super block") + ErrSuperBlockParseFailure = errors.New("failed to parse dm-verity super block") + ErrRootHashReadFailure = errors.New("failed to read dm-verity root hash") +) + +type dmveritySuperblock struct { + /* (0) "verity\0\0" */ + Signature [8]byte + /* (8) superblock version, 1 */ + Version uint32 + /* (12) 0 - Chrome OS, 1 - normal */ + HashType uint32 + /* (16) UUID of hash device */ + UUID [16]byte + /* (32) Name of the hash algorithm (e.g., sha256) */ + Algorithm [32]byte + /* (64) The data block size in bytes */ + DataBlockSize uint32 + /* (68) The hash block size in bytes */ + HashBlockSize uint32 + /* (72) The number of data blocks */ + DataBlocks uint64 + /* (80) Size of the salt */ + SaltSize uint16 + /* (82) Padding */ + _ [6]byte + /* (88) The salt */ + Salt [256]byte + /* (344) Padding */ + _ [168]byte +} + +// VerityInfo is minimal exported version of dmveritySuperblock +type VerityInfo struct { + // Offset in blocks on hash device + HashOffsetInBlocks int64 + // Set to true, when dm-verity super block is also written on the hash device + SuperBlock bool + RootDigest string + Salt string + Algorithm string + DataBlockSize uint32 + HashBlockSize uint32 + DataBlocks uint64 +} + +// MerkleTree constructs dm-verity hash-tree for a given byte array with a fixed salt (0-byte) and algorithm (sha256). +func MerkleTree(data []byte) ([]byte, error) { + layers := make([][]byte, 0) + + currentLevel := bytes.NewBuffer(data) + + for currentLevel.Len() != blockSize { + blocks := currentLevel.Len() / blockSize + nextLevel := bytes.NewBuffer(make([]byte, 0)) + + for i := 0; i < blocks; i++ { + block := make([]byte, blockSize) + _, err := currentLevel.Read(block) + if err != nil { + return nil, errors.Wrap(err, "failed to read data block") + } + h := hash2(salt, block) + nextLevel.Write(h) + } + + padding := bytes.Repeat([]byte{0}, blockSize-(nextLevel.Len()%blockSize)) + nextLevel.Write(padding) + + currentLevel = nextLevel + layers = append(layers, currentLevel.Bytes()) + } + + var tree = bytes.NewBuffer(make([]byte, 0)) + for i := len(layers) - 1; i >= 0; i-- { + _, err := tree.Write(layers[i]) + if err != nil { + return nil, errors.Wrap(err, "failed to write merkle tree") + } + } + + return tree.Bytes(), nil +} + +// RootHash computes root hash of dm-verity hash-tree +func RootHash(tree []byte) []byte { + return hash2(salt, tree[:blockSize]) +} + +// NewDMVeritySuperblock returns a dm-verity superblock for a device with a given size, salt, algorithm and versions are +// fixed. +func NewDMVeritySuperblock(size uint64) *dmveritySuperblock { + superblock := &dmveritySuperblock{ + Version: 1, + HashType: 1, + UUID: generateUUID(), + DataBlockSize: blockSize, + HashBlockSize: blockSize, + DataBlocks: size / blockSize, + SaltSize: uint16(len(salt)), + } + + copy(superblock.Signature[:], "verity") + copy(superblock.Algorithm[:], "sha256") + copy(superblock.Salt[:], salt) + + return superblock +} + +func hash2(a, b []byte) []byte { + h := sha256.New() + h.Write(append(a, b...)) + return h.Sum(nil) +} + +func generateUUID() [16]byte { + res := [16]byte{} + if _, err := rand.Read(res[:]); err != nil { + panic(err) + } + return res +} + +// ReadDMVerityInfo extracts dm-verity super block information and merkle tree root hash +func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error) { + vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer vhd.Close() + + // Skip the ext4 data to get to dm-verity super block + if s, err := vhd.Seek(offsetInBytes, io.SeekStart); err != nil || s != offsetInBytes { + if err != nil { + return nil, errors.Wrap(err, "failed to seek dm-verity super block") + } + return nil, errors.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s) + } + + block := make([]byte, blockSize) + if s, err := vhd.Read(block); err != nil || s != blockSize { + if err != nil { + return nil, errors.Wrapf(ErrSuperBlockReadFailure, "%s", err) + } + return nil, errors.Wrapf(ErrSuperBlockReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s) + } + + dmvSB := &dmveritySuperblock{} + b := bytes.NewBuffer(block) + if err := binary.Read(b, binary.LittleEndian, dmvSB); err != nil { + return nil, errors.Wrapf(ErrSuperBlockParseFailure, "%s", err) + } + + // read the merkle tree root + if s, err := vhd.Read(block); err != nil || s != blockSize { + if err != nil { + return nil, errors.Wrapf(ErrRootHashReadFailure, "%s", err) + } + return nil, errors.Wrapf(ErrRootHashReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s) + } + rootHash := hash2(dmvSB.Salt[:dmvSB.SaltSize], block) + return &VerityInfo{ + RootDigest: fmt.Sprintf("%x", rootHash), + Algorithm: string(bytes.Trim(dmvSB.Algorithm[:], "\x00")), + Salt: fmt.Sprintf("%x", dmvSB.Salt[:dmvSB.SaltSize]), + HashOffsetInBlocks: int64(dmvSB.DataBlocks), + SuperBlock: true, + DataBlocks: dmvSB.DataBlocks, + DataBlockSize: dmvSB.DataBlockSize, + HashBlockSize: blockSize, + }, nil +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go new file mode 100644 index 0000000000..f40ac8f989 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go @@ -0,0 +1,1328 @@ +package compactext4 + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + "github.com/Microsoft/hcsshim/ext4/internal/format" +) + +// Writer writes a compact ext4 file system. +type Writer struct { + f io.ReadWriteSeeker + bw *bufio.Writer + inodes []*inode + curName string + curInode *inode + pos int64 + dataWritten, dataMax int64 + err error + initialized bool + supportInlineData bool + maxDiskSize int64 + gdBlocks uint32 +} + +// Mode flags for Linux files. +const ( + S_IXOTH = format.S_IXOTH + S_IWOTH = format.S_IWOTH + S_IROTH = format.S_IROTH + S_IXGRP = format.S_IXGRP + S_IWGRP = format.S_IWGRP + S_IRGRP = format.S_IRGRP + S_IXUSR = format.S_IXUSR + S_IWUSR = format.S_IWUSR + S_IRUSR = format.S_IRUSR + S_ISVTX = format.S_ISVTX + S_ISGID = format.S_ISGID + S_ISUID = format.S_ISUID + S_IFIFO = format.S_IFIFO + S_IFCHR = format.S_IFCHR + S_IFDIR = format.S_IFDIR + S_IFBLK = format.S_IFBLK + S_IFREG = format.S_IFREG + S_IFLNK = format.S_IFLNK + S_IFSOCK = format.S_IFSOCK + + TypeMask = format.TypeMask +) + +type inode struct { + Size int64 + Atime, Ctime, Mtime, Crtime uint64 + Number format.InodeNumber + Mode uint16 + Uid, Gid uint32 + LinkCount uint32 + XattrBlock uint32 + BlockCount uint32 + Devmajor, Devminor uint32 + Flags format.InodeFlag + Data []byte + XattrInline []byte + Children directory +} + +func (node *inode) FileType() uint16 { + return node.Mode & format.TypeMask +} + +func (node *inode) IsDir() bool { + return node.FileType() == S_IFDIR +} + +// A File represents a file to be added to an ext4 file system. +type File struct { + Linkname string + Size int64 + Mode uint16 + Uid, Gid uint32 + Atime, Ctime, Mtime, Crtime time.Time + Devmajor, Devminor uint32 + Xattrs map[string][]byte +} + +const ( + inodeFirst = 11 + inodeLostAndFound = inodeFirst + + BlockSize = 4096 + blocksPerGroup = BlockSize * 8 + inodeSize = 256 + maxInodesPerGroup = BlockSize * 8 // Limited by the inode bitmap + inodesPerGroupIncrement = BlockSize / inodeSize + + defaultMaxDiskSize = 16 * 1024 * 1024 * 1024 // 16GB + maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB + + groupDescriptorSize = 32 // Use the small group descriptor + groupsPerDescriptorBlock = BlockSize / groupDescriptorSize + + maxFileSize = 128 * 1024 * 1024 * 1024 // 128GB file size maximum for now + smallSymlinkSize = 59 // max symlink size that goes directly in the inode + maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent + inodeDataSize = 60 + inodeUsedSize = 152 // fields through CrtimeExtra + inodeExtraSize = inodeSize - inodeUsedSize + xattrInodeOverhead = 4 + 4 // magic number + empty next entry value + xattrBlockOverhead = 32 + 4 // header + empty next entry value + inlineDataXattrOverhead = xattrInodeOverhead + 16 + 4 // entry + "data" + inlineDataSize = inodeDataSize + inodeExtraSize - inlineDataXattrOverhead +) + +type exceededMaxSizeError struct { + Size int64 +} + +func (err exceededMaxSizeError) Error() string { + return fmt.Sprintf("disk exceeded maximum size of %d bytes", err.Size) +} + +var directoryEntrySize = binary.Size(format.DirectoryEntry{}) +var extraIsize = uint16(inodeUsedSize - 128) + +type directory map[string]*inode + +func splitFirst(p string) (string, string) { + n := strings.IndexByte(p, '/') + if n >= 0 { + return p[:n], p[n+1:] + } + return p, "" +} + +func (w *Writer) findPath(root *inode, p string) *inode { + inode := root + for inode != nil && len(p) != 0 { + name, rest := splitFirst(p) + p = rest + inode = inode.Children[name] + } + return inode +} + +func timeToFsTime(t time.Time) uint64 { + if t.IsZero() { + return 0 + } + s := t.Unix() + if s < -0x80000000 { + return 0x80000000 + } + if s > 0x37fffffff { + return 0x37fffffff + } + return uint64(s) | uint64(t.Nanosecond())<<34 +} + +func fsTimeToTime(t uint64) time.Time { + if t == 0 { + return time.Time{} + } + s := int64(t & 0x3ffffffff) + if s > 0x7fffffff && s < 0x100000000 { + s = int64(int32(uint32(s))) + } + return time.Unix(s, int64(t>>34)) +} + +func (w *Writer) getInode(i format.InodeNumber) *inode { + if i == 0 || int(i) > len(w.inodes) { + return nil + } + return w.inodes[i-1] +} + +var xattrPrefixes = []struct { + Index uint8 + Prefix string +}{ + {2, "system.posix_acl_access"}, + {3, "system.posix_acl_default"}, + {8, "system.richacl"}, + {7, "system."}, + {1, "user."}, + {4, "trusted."}, + {6, "security."}, +} + +func compressXattrName(name string) (uint8, string) { + for _, p := range xattrPrefixes { + if strings.HasPrefix(name, p.Prefix) { + return p.Index, name[len(p.Prefix):] + } + } + return 0, name +} + +func decompressXattrName(index uint8, name string) string { + for _, p := range xattrPrefixes { + if index == p.Index { + return p.Prefix + name + } + } + return name +} + +func hashXattrEntry(name string, value []byte) uint32 { + var hash uint32 + for i := 0; i < len(name); i++ { + hash = (hash << 5) ^ (hash >> 27) ^ uint32(name[i]) + } + + for i := 0; i+3 < len(value); i += 4 { + hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(value[i:i+4]) + } + + if len(value)%4 != 0 { + var last [4]byte + copy(last[:], value[len(value)&^3:]) + hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(last[:]) + } + return hash +} + +type xattr struct { + Name string + Index uint8 + Value []byte +} + +func (x *xattr) EntryLen() int { + return (len(x.Name)+3)&^3 + 16 +} + +func (x *xattr) ValueLen() int { + return (len(x.Value) + 3) &^ 3 +} + +type xattrState struct { + inode, block []xattr + inodeLeft, blockLeft int +} + +func (s *xattrState) init() { + s.inodeLeft = inodeExtraSize - xattrInodeOverhead + s.blockLeft = BlockSize - xattrBlockOverhead +} + +func (s *xattrState) addXattr(name string, value []byte) bool { + index, name := compressXattrName(name) + x := xattr{ + Index: index, + Name: name, + Value: value, + } + length := x.EntryLen() + x.ValueLen() + if s.inodeLeft >= length { + s.inode = append(s.inode, x) + s.inodeLeft -= length + } else if s.blockLeft >= length { + s.block = append(s.block, x) + s.blockLeft -= length + } else { + return false + } + return true +} + +func putXattrs(xattrs []xattr, b []byte, offsetDelta uint16) { + offset := uint16(len(b)) + offsetDelta + eb := b + db := b + for _, xattr := range xattrs { + vl := xattr.ValueLen() + offset -= uint16(vl) + eb[0] = uint8(len(xattr.Name)) + eb[1] = xattr.Index + binary.LittleEndian.PutUint16(eb[2:], offset) + binary.LittleEndian.PutUint32(eb[8:], uint32(len(xattr.Value))) + binary.LittleEndian.PutUint32(eb[12:], hashXattrEntry(xattr.Name, xattr.Value)) + copy(eb[16:], xattr.Name) + eb = eb[xattr.EntryLen():] + copy(db[len(db)-vl:], xattr.Value) + db = db[:len(db)-vl] + } +} + +func getXattrs(b []byte, xattrs map[string][]byte, offsetDelta uint16) { + eb := b + for len(eb) != 0 { + nameLen := eb[0] + if nameLen == 0 { + break + } + index := eb[1] + offset := binary.LittleEndian.Uint16(eb[2:]) - offsetDelta + valueLen := binary.LittleEndian.Uint32(eb[8:]) + attr := xattr{ + Index: index, + Name: string(eb[16 : 16+nameLen]), + Value: b[offset : uint32(offset)+valueLen], + } + xattrs[decompressXattrName(index, attr.Name)] = attr.Value + eb = eb[attr.EntryLen():] + } +} + +func (w *Writer) writeXattrs(inode *inode, state *xattrState) error { + // Write the inline attributes. + if len(state.inode) != 0 { + inode.XattrInline = make([]byte, inodeExtraSize) + binary.LittleEndian.PutUint32(inode.XattrInline[0:], format.XAttrHeaderMagic) // Magic + putXattrs(state.inode, inode.XattrInline[4:], 0) + } + + // Write the block attributes. If there was previously an xattr block, then + // rewrite it even if it is now empty. + if len(state.block) != 0 || inode.XattrBlock != 0 { + sort.Slice(state.block, func(i, j int) bool { + return state.block[i].Index < state.block[j].Index || + len(state.block[i].Name) < len(state.block[j].Name) || + state.block[i].Name < state.block[j].Name + }) + + var b [BlockSize]byte + binary.LittleEndian.PutUint32(b[0:], format.XAttrHeaderMagic) // Magic + binary.LittleEndian.PutUint32(b[4:], 1) // ReferenceCount + binary.LittleEndian.PutUint32(b[8:], 1) // Blocks + putXattrs(state.block, b[32:], 32) + + orig := w.block() + if inode.XattrBlock == 0 { + inode.XattrBlock = orig + inode.BlockCount++ + } else { + // Reuse the original block. + w.seekBlock(inode.XattrBlock) + defer w.seekBlock(orig) + } + + if _, err := w.write(b[:]); err != nil { + return err + } + } + + return nil +} + +func (w *Writer) write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.pos+int64(len(b)) > w.maxDiskSize { + w.err = exceededMaxSizeError{w.maxDiskSize} + return 0, w.err + } + n, err := w.bw.Write(b) + w.pos += int64(n) + w.err = err + return n, err +} + +func (w *Writer) zero(n int64) (int64, error) { + if w.err != nil { + return 0, w.err + } + if w.pos+int64(n) > w.maxDiskSize { + w.err = exceededMaxSizeError{w.maxDiskSize} + return 0, w.err + } + n, err := io.CopyN(w.bw, zero, n) + w.pos += n + w.err = err + return n, err +} + +func (w *Writer) makeInode(f *File, node *inode) (*inode, error) { + mode := f.Mode + if mode&format.TypeMask == 0 { + mode |= format.S_IFREG + } + typ := mode & format.TypeMask + ino := format.InodeNumber(len(w.inodes) + 1) + if node == nil { + node = &inode{ + Number: ino, + } + if typ == S_IFDIR { + node.Children = make(directory) + node.LinkCount = 1 // A directory is linked to itself. + } + } else if node.Flags&format.InodeFlagExtents != 0 { + // Since we cannot deallocate or reuse blocks, don't allow updates that + // would invalidate data that has already been written. + return nil, errors.New("cannot overwrite file with non-inline data") + } + node.Mode = mode + node.Uid = f.Uid + node.Gid = f.Gid + node.Flags = format.InodeFlagHugeFile + node.Atime = timeToFsTime(f.Atime) + node.Ctime = timeToFsTime(f.Ctime) + node.Mtime = timeToFsTime(f.Mtime) + node.Crtime = timeToFsTime(f.Crtime) + node.Devmajor = f.Devmajor + node.Devminor = f.Devminor + node.Data = nil + node.XattrInline = nil + + var xstate xattrState + xstate.init() + + var size int64 + switch typ { + case format.S_IFREG: + size = f.Size + if f.Size > maxFileSize { + return nil, fmt.Errorf("file too big: %d > %d", f.Size, int64(maxFileSize)) + } + if f.Size <= inlineDataSize && w.supportInlineData { + node.Data = make([]byte, f.Size) + extra := 0 + if f.Size > inodeDataSize { + extra = int(f.Size - inodeDataSize) + } + // Add a dummy entry for now. + if !xstate.addXattr("system.data", node.Data[:extra]) { + panic("not enough room for inline data") + } + node.Flags |= format.InodeFlagInlineData + } + case format.S_IFLNK: + node.Mode |= 0777 // Symlinks should appear as ugw rwx + size = int64(len(f.Linkname)) + if size <= smallSymlinkSize { + // Special case: small symlinks go directly in Block without setting + // an inline data flag. + node.Data = make([]byte, len(f.Linkname)) + copy(node.Data, f.Linkname) + } + case format.S_IFDIR, format.S_IFIFO, format.S_IFSOCK, format.S_IFCHR, format.S_IFBLK: + default: + return nil, fmt.Errorf("invalid mode %o", mode) + } + + // Accumulate the extended attributes. + if len(f.Xattrs) != 0 { + // Sort the xattrs to avoid non-determinism in map iteration. + var xattrs []string + for name := range f.Xattrs { + xattrs = append(xattrs, name) + } + sort.Strings(xattrs) + for _, name := range xattrs { + if !xstate.addXattr(name, f.Xattrs[name]) { + return nil, fmt.Errorf("could not fit xattr %s", name) + } + } + } + + if err := w.writeXattrs(node, &xstate); err != nil { + return nil, err + } + + node.Size = size + if typ == format.S_IFLNK && size > smallSymlinkSize { + // Write the link name as data. + w.startInode("", node, size) + if _, err := w.Write([]byte(f.Linkname)); err != nil { + return nil, err + } + if err := w.finishInode(); err != nil { + return nil, err + } + } + + if int(node.Number-1) >= len(w.inodes) { + w.inodes = append(w.inodes, node) + } + return node, nil +} + +func (w *Writer) root() *inode { + return w.getInode(format.InodeRoot) +} + +func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, error) { + root := w.root() + cleanname := path.Clean("/" + name)[1:] + if len(cleanname) == 0 { + return root, root, "", nil + } + dirname, childname := path.Split(cleanname) + if len(childname) == 0 || len(childname) > 0xff { + return nil, nil, "", fmt.Errorf("%s: invalid name", name) + } + dir := w.findPath(root, dirname) + if dir == nil || !dir.IsDir() { + return nil, nil, "", fmt.Errorf("%s: path not found", name) + } + child := dir.Children[childname] + if child == nil && mustExist { + return nil, nil, "", fmt.Errorf("%s: file not found", name) + } + return dir, child, childname, nil +} + +// CreateWithParents adds a file to the file system creating the parent directories in the path if +// they don't exist (like `mkdir -p`). These non existing parent directories are created +// with the same permissions as that of it's parent directory. It is expected that the a +// call to make these parent directories will be made at a later point with the correct +// permissions, at that time the permissions of these directories will be updated. +func (w *Writer) CreateWithParents(name string, f *File) error { + if err := w.finishInode(); err != nil { + return err + } + // go through the directories in the path one by one and create the + // parent directories if they don't exist. + cleanname := path.Clean("/" + name)[1:] + parentDirs, _ := path.Split(cleanname) + currentPath := "" + root := w.root() + dirname := "" + for parentDirs != "" { + dirname, parentDirs = splitFirst(parentDirs) + currentPath += "/" + dirname + if _, ok := root.Children[dirname]; !ok { + f := &File{ + Mode: root.Mode, + Atime: time.Now(), + Mtime: time.Now(), + Ctime: time.Now(), + Crtime: time.Now(), + Size: 0, + Uid: root.Uid, + Gid: root.Gid, + Devmajor: root.Devmajor, + Devminor: root.Devminor, + Xattrs: make(map[string][]byte), + } + if err := w.Create(currentPath, f); err != nil { + return fmt.Errorf("failed while creating parent directories: %w", err) + } + } + root = root.Children[dirname] + } + return w.Create(name, f) +} + +// Create adds a file to the file system. +func (w *Writer) Create(name string, f *File) error { + if err := w.finishInode(); err != nil { + return err + } + dir, existing, childname, err := w.lookup(name, false) + if err != nil { + return err + } + var reuse *inode + if existing != nil { + if existing.IsDir() { + if f.Mode&TypeMask != S_IFDIR { + return fmt.Errorf("%s: cannot replace a directory with a file", name) + } + reuse = existing + } else if f.Mode&TypeMask == S_IFDIR { + return fmt.Errorf("%s: cannot replace a file with a directory", name) + } else if existing.LinkCount < 2 { + reuse = existing + } + } else { + if f.Mode&TypeMask == S_IFDIR && dir.LinkCount >= format.MaxLinks { + return fmt.Errorf("%s: exceeded parent directory maximum link count", name) + } + } + child, err := w.makeInode(f, reuse) + if err != nil { + return fmt.Errorf("%s: %s", name, err) + } + if existing != child { + if existing != nil { + existing.LinkCount-- + } + dir.Children[childname] = child + child.LinkCount++ + if child.IsDir() { + dir.LinkCount++ + } + } + if child.Mode&format.TypeMask == format.S_IFREG { + w.startInode(name, child, f.Size) + } + return nil +} + +// Link adds a hard link to the file system. +func (w *Writer) Link(oldname, newname string) error { + if err := w.finishInode(); err != nil { + return err + } + newdir, existing, newchildname, err := w.lookup(newname, false) + if err != nil { + return err + } + if existing != nil && (existing.IsDir() || existing.LinkCount < 2) { + return fmt.Errorf("%s: cannot orphan existing file or directory", newname) + } + + _, oldfile, _, err := w.lookup(oldname, true) + if err != nil { + return err + } + switch oldfile.Mode & format.TypeMask { + case format.S_IFDIR, format.S_IFLNK: + return fmt.Errorf("%s: link target cannot be a directory or symlink: %s", newname, oldname) + } + + if existing != oldfile && oldfile.LinkCount >= format.MaxLinks { + return fmt.Errorf("%s: link target would exceed maximum link count: %s", newname, oldname) + } + + if existing != nil { + existing.LinkCount-- + } + oldfile.LinkCount++ + newdir.Children[newchildname] = oldfile + return nil +} + +// Stat returns information about a file that has been written. +func (w *Writer) Stat(name string) (*File, error) { + if err := w.finishInode(); err != nil { + return nil, err + } + _, node, _, err := w.lookup(name, true) + if err != nil { + return nil, err + } + f := &File{ + Size: node.Size, + Mode: node.Mode, + Uid: node.Uid, + Gid: node.Gid, + Atime: fsTimeToTime(node.Atime), + Ctime: fsTimeToTime(node.Ctime), + Mtime: fsTimeToTime(node.Mtime), + Crtime: fsTimeToTime(node.Crtime), + Devmajor: node.Devmajor, + Devminor: node.Devminor, + } + f.Xattrs = make(map[string][]byte) + if node.XattrBlock != 0 || len(node.XattrInline) != 0 { + if node.XattrBlock != 0 { + orig := w.block() + w.seekBlock(node.XattrBlock) + if w.err != nil { + return nil, w.err + } + var b [BlockSize]byte + _, err := w.f.Read(b[:]) + w.seekBlock(orig) + if err != nil { + return nil, err + } + getXattrs(b[32:], f.Xattrs, 32) + } + if len(node.XattrInline) != 0 { + getXattrs(node.XattrInline[4:], f.Xattrs, 0) + delete(f.Xattrs, "system.data") + } + } + if node.FileType() == S_IFLNK { + if node.Size > smallSymlinkSize { + return nil, fmt.Errorf("%s: cannot retrieve link information", name) + } + f.Linkname = string(node.Data) + } + return f, nil +} + +func (w *Writer) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + if w.dataWritten+int64(len(b)) > w.dataMax { + return 0, fmt.Errorf("%s: wrote too much: %d > %d", w.curName, w.dataWritten+int64(len(b)), w.dataMax) + } + + if w.curInode.Flags&format.InodeFlagInlineData != 0 { + copy(w.curInode.Data[w.dataWritten:], b) + w.dataWritten += int64(len(b)) + return len(b), nil + } + + n, err := w.write(b) + w.dataWritten += int64(n) + return n, err +} + +func (w *Writer) startInode(name string, inode *inode, size int64) { + if w.curInode != nil { + panic("inode already in progress") + } + w.curName = name + w.curInode = inode + w.dataWritten = 0 + w.dataMax = size +} + +func (w *Writer) block() uint32 { + return uint32(w.pos / BlockSize) +} + +func (w *Writer) seekBlock(block uint32) { + w.pos = int64(block) * BlockSize + if w.err != nil { + return + } + w.err = w.bw.Flush() + if w.err != nil { + return + } + _, w.err = w.f.Seek(w.pos, io.SeekStart) +} + +func (w *Writer) nextBlock() { + if w.pos%BlockSize != 0 { + // Simplify callers; w.err is updated on failure. + _, _ = w.zero(BlockSize - w.pos%BlockSize) + } +} + +func fillExtents(hdr *format.ExtentHeader, extents []format.ExtentLeafNode, startBlock, offset, inodeSize uint32) { + *hdr = format.ExtentHeader{ + Magic: format.ExtentHeaderMagic, + Entries: uint16(len(extents)), + Max: uint16(cap(extents)), + Depth: 0, + } + for i := range extents { + block := offset + uint32(i)*maxBlocksPerExtent + length := inodeSize - block + if length > maxBlocksPerExtent { + length = maxBlocksPerExtent + } + start := startBlock + block + extents[i] = format.ExtentLeafNode{ + Block: block, + Length: uint16(length), + StartLow: start, + } + } +} + +func (w *Writer) writeExtents(inode *inode) error { + start := w.pos - w.dataWritten + if start%BlockSize != 0 { + panic("unaligned") + } + w.nextBlock() + + startBlock := uint32(start / BlockSize) + blocks := w.block() - startBlock + usedBlocks := blocks + + const extentNodeSize = 12 + const extentsPerBlock = BlockSize/extentNodeSize - 1 + + extents := (blocks + maxBlocksPerExtent - 1) / maxBlocksPerExtent + var b bytes.Buffer + if extents == 0 { + // Nothing to do. + } else if extents <= 4 { + var root struct { + hdr format.ExtentHeader + extents [4]format.ExtentLeafNode + } + fillExtents(&root.hdr, root.extents[:extents], startBlock, 0, blocks) + _ = binary.Write(&b, binary.LittleEndian, root) + } else if extents <= 4*extentsPerBlock { + const extentsPerBlock = BlockSize/extentNodeSize - 1 + extentBlocks := extents/extentsPerBlock + 1 + usedBlocks += extentBlocks + var b2 bytes.Buffer + + var root struct { + hdr format.ExtentHeader + nodes [4]format.ExtentIndexNode + } + root.hdr = format.ExtentHeader{ + Magic: format.ExtentHeaderMagic, + Entries: uint16(extentBlocks), + Max: 4, + Depth: 1, + } + for i := uint32(0); i < extentBlocks; i++ { + root.nodes[i] = format.ExtentIndexNode{ + Block: i * extentsPerBlock * maxBlocksPerExtent, + LeafLow: w.block(), + } + extentsInBlock := extents - i*extentBlocks + if extentsInBlock > extentsPerBlock { + extentsInBlock = extentsPerBlock + } + + var node struct { + hdr format.ExtentHeader + extents [extentsPerBlock]format.ExtentLeafNode + _ [BlockSize - (extentsPerBlock+1)*extentNodeSize]byte + } + + offset := i * extentsPerBlock * maxBlocksPerExtent + fillExtents(&node.hdr, node.extents[:extentsInBlock], startBlock+offset, offset, blocks) + _ = binary.Write(&b2, binary.LittleEndian, node) + if _, err := w.write(b2.Next(BlockSize)); err != nil { + return err + } + } + _ = binary.Write(&b, binary.LittleEndian, root) + } else { + panic("file too big") + } + + inode.Data = b.Bytes() + inode.Flags |= format.InodeFlagExtents + inode.BlockCount += usedBlocks + return w.err +} + +func (w *Writer) finishInode() error { + if !w.initialized { + if err := w.init(); err != nil { + return err + } + } + if w.curInode == nil { + return nil + } + if w.dataWritten != w.dataMax { + return fmt.Errorf("did not write the right amount: %d != %d", w.dataWritten, w.dataMax) + } + + if w.dataMax != 0 && w.curInode.Flags&format.InodeFlagInlineData == 0 { + if err := w.writeExtents(w.curInode); err != nil { + return err + } + } + + w.dataWritten = 0 + w.dataMax = 0 + w.curInode = nil + return w.err +} + +func modeToFileType(mode uint16) format.FileType { + switch mode & format.TypeMask { + default: + return format.FileTypeUnknown + case format.S_IFREG: + return format.FileTypeRegular + case format.S_IFDIR: + return format.FileTypeDirectory + case format.S_IFCHR: + return format.FileTypeCharacter + case format.S_IFBLK: + return format.FileTypeBlock + case format.S_IFIFO: + return format.FileTypeFIFO + case format.S_IFSOCK: + return format.FileTypeSocket + case format.S_IFLNK: + return format.FileTypeSymbolicLink + } +} + +type constReader byte + +var zero = constReader(0) + +func (r constReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = byte(r) + } + return len(b), nil +} + +func (w *Writer) writeDirectory(dir, parent *inode) error { + if err := w.finishInode(); err != nil { + return err + } + + // The size of the directory is not known yet. + w.startInode("", dir, 0x7fffffffffffffff) + left := BlockSize + finishBlock := func() error { + if left > 0 { + e := format.DirectoryEntry{ + RecordLength: uint16(left), + } + err := binary.Write(w, binary.LittleEndian, e) + if err != nil { + return err + } + left -= directoryEntrySize + if left < 4 { + panic("not enough space for trailing entry") + } + _, err = io.CopyN(w, zero, int64(left)) + if err != nil { + return err + } + } + left = BlockSize + return nil + } + + writeEntry := func(ino format.InodeNumber, name string) error { + rlb := directoryEntrySize + len(name) + rl := (rlb + 3) & ^3 + if left < rl+12 { + if err := finishBlock(); err != nil { + return err + } + } + e := format.DirectoryEntry{ + Inode: ino, + RecordLength: uint16(rl), + NameLength: uint8(len(name)), + FileType: modeToFileType(w.getInode(ino).Mode), + } + err := binary.Write(w, binary.LittleEndian, e) + if err != nil { + return err + } + _, err = w.Write([]byte(name)) + if err != nil { + return err + } + var zero [4]byte + _, err = w.Write(zero[:rl-rlb]) + if err != nil { + return err + } + left -= rl + return nil + } + if err := writeEntry(dir.Number, "."); err != nil { + return err + } + if err := writeEntry(parent.Number, ".."); err != nil { + return err + } + + // Follow e2fsck's convention and sort the children by inode number. + var children []string + for name := range dir.Children { + children = append(children, name) + } + sort.Slice(children, func(i, j int) bool { + left_num := dir.Children[children[i]].Number + right_num := dir.Children[children[j]].Number + + if left_num == right_num { + return children[i] < children[j] + } + return left_num < right_num + }) + + for _, name := range children { + child := dir.Children[name] + if err := writeEntry(child.Number, name); err != nil { + return err + } + } + if err := finishBlock(); err != nil { + return err + } + w.curInode.Size = w.dataWritten + w.dataMax = w.dataWritten + return nil +} + +func (w *Writer) writeDirectoryRecursive(dir, parent *inode) error { + if err := w.writeDirectory(dir, parent); err != nil { + return err + } + + // Follow e2fsck's convention and sort the children by inode number. + var children []string + for name := range dir.Children { + children = append(children, name) + } + sort.Slice(children, func(i, j int) bool { + left_num := dir.Children[children[i]].Number + right_num := dir.Children[children[j]].Number + + if left_num == right_num { + return children[i] < children[j] + } + return left_num < right_num + }) + + for _, name := range children { + child := dir.Children[name] + if child.IsDir() { + if err := w.writeDirectoryRecursive(child, dir); err != nil { + return err + } + } + } + return nil +} + +func (w *Writer) writeInodeTable(tableSize uint32) error { + var b bytes.Buffer + for _, inode := range w.inodes { + if inode != nil { + binode := format.Inode{ + Mode: inode.Mode, + Uid: uint16(inode.Uid & 0xffff), + Gid: uint16(inode.Gid & 0xffff), + SizeLow: uint32(inode.Size & 0xffffffff), + SizeHigh: uint32(inode.Size >> 32), + LinksCount: uint16(inode.LinkCount), + BlocksLow: inode.BlockCount, + Flags: inode.Flags, + XattrBlockLow: inode.XattrBlock, + UidHigh: uint16(inode.Uid >> 16), + GidHigh: uint16(inode.Gid >> 16), + ExtraIsize: uint16(inodeUsedSize - 128), + Atime: uint32(inode.Atime), + AtimeExtra: uint32(inode.Atime >> 32), + Ctime: uint32(inode.Ctime), + CtimeExtra: uint32(inode.Ctime >> 32), + Mtime: uint32(inode.Mtime), + MtimeExtra: uint32(inode.Mtime >> 32), + Crtime: uint32(inode.Crtime), + CrtimeExtra: uint32(inode.Crtime >> 32), + } + switch inode.Mode & format.TypeMask { + case format.S_IFDIR, format.S_IFREG, format.S_IFLNK: + n := copy(binode.Block[:], inode.Data) + if n < len(inode.Data) { + // Rewrite the first xattr with the data. + xattr := [1]xattr{{ + Name: "data", + Index: 7, // "system." + Value: inode.Data[n:], + }} + putXattrs(xattr[:], inode.XattrInline[4:], 0) + } + case format.S_IFBLK, format.S_IFCHR: + dev := inode.Devminor&0xff | inode.Devmajor<<8 | (inode.Devminor&0xffffff00)<<12 + binary.LittleEndian.PutUint32(binode.Block[4:], dev) + } + + _ = binary.Write(&b, binary.LittleEndian, binode) + b.Truncate(inodeUsedSize) + n, _ := b.Write(inode.XattrInline) + _, _ = io.CopyN(&b, zero, int64(inodeExtraSize-n)) + } else { + _, _ = io.CopyN(&b, zero, inodeSize) + } + if _, err := w.write(b.Next(inodeSize)); err != nil { + return err + } + } + rest := tableSize - uint32(len(w.inodes)*inodeSize) + if _, err := w.zero(int64(rest)); err != nil { + return err + } + return nil +} + +// NewWriter returns a Writer that writes an ext4 file system to the provided +// WriteSeeker. +func NewWriter(f io.ReadWriteSeeker, opts ...Option) *Writer { + w := &Writer{ + f: f, + bw: bufio.NewWriterSize(f, 65536*8), + maxDiskSize: defaultMaxDiskSize, + } + for _, opt := range opts { + opt(w) + } + return w +} + +// An Option provides extra options to NewWriter. +type Option func(*Writer) + +// InlineData instructs the Writer to write small files into the inode +// structures directly. This creates smaller images but currently is not +// compatible with DAX. +func InlineData(w *Writer) { + w.supportInlineData = true +} + +// MaximumDiskSize instructs the writer to reserve enough metadata space for the +// specified disk size. If not provided, then 16GB is the default. +func MaximumDiskSize(size int64) Option { + return func(w *Writer) { + if size < 0 || size > maxMaxDiskSize { + w.maxDiskSize = maxMaxDiskSize + } else if size == 0 { + w.maxDiskSize = defaultMaxDiskSize + } else { + w.maxDiskSize = (size + BlockSize - 1) &^ (BlockSize - 1) + } + } +} + +func (w *Writer) init() error { + // Skip the defective block inode. + w.inodes = make([]*inode, 1, 32) + // Create the root directory. + root, _ := w.makeInode(&File{ + Mode: format.S_IFDIR | 0755, + }, nil) + root.LinkCount++ // The root is linked to itself. + // Skip until the first non-reserved inode. + w.inodes = append(w.inodes, make([]*inode, inodeFirst-len(w.inodes)-1)...) + maxBlocks := (w.maxDiskSize-1)/BlockSize + 1 + maxGroups := (maxBlocks-1)/blocksPerGroup + 1 + w.gdBlocks = uint32((maxGroups-1)/groupsPerDescriptorBlock + 1) + + // Skip past the superblock and block descriptor table. + w.seekBlock(1 + w.gdBlocks) + w.initialized = true + + // The lost+found directory is required to exist for e2fsck to pass. + if err := w.Create("lost+found", &File{Mode: format.S_IFDIR | 0700}); err != nil { + return err + } + return w.err +} + +func groupCount(blocks uint32, inodes uint32, inodesPerGroup uint32) uint32 { + inodeBlocksPerGroup := inodesPerGroup * inodeSize / BlockSize + dataBlocksPerGroup := blocksPerGroup - inodeBlocksPerGroup - 2 // save room for the bitmaps + + // Increase the block count to ensure there are enough groups for all the + // inodes. + minBlocks := (inodes-1)/inodesPerGroup*dataBlocksPerGroup + 1 + if blocks < minBlocks { + blocks = minBlocks + } + + return (blocks + dataBlocksPerGroup - 1) / dataBlocksPerGroup +} + +func bestGroupCount(blocks uint32, inodes uint32) (groups uint32, inodesPerGroup uint32) { + groups = 0xffffffff + for ipg := uint32(inodesPerGroupIncrement); ipg <= maxInodesPerGroup; ipg += inodesPerGroupIncrement { + g := groupCount(blocks, inodes, ipg) + if g < groups { + groups = g + inodesPerGroup = ipg + } + } + return +} + +func (w *Writer) Close() error { + if err := w.finishInode(); err != nil { + return err + } + root := w.root() + if err := w.writeDirectoryRecursive(root, root); err != nil { + return err + } + // Finish the last inode (probably a directory). + if err := w.finishInode(); err != nil { + return err + } + + // Write the inode table + inodeTableOffset := w.block() + groups, inodesPerGroup := bestGroupCount(inodeTableOffset, uint32(len(w.inodes))) + err := w.writeInodeTable(groups * inodesPerGroup * inodeSize) + if err != nil { + return err + } + + // Write the bitmaps. + bitmapOffset := w.block() + bitmapSize := groups * 2 + validDataSize := bitmapOffset + bitmapSize + diskSize := validDataSize + minSize := (groups-1)*blocksPerGroup + 1 + if diskSize < minSize { + diskSize = minSize + } + + usedGdBlocks := (groups-1)/groupsPerDescriptorBlock + 1 + if usedGdBlocks > w.gdBlocks { + return exceededMaxSizeError{w.maxDiskSize} + } + + gds := make([]format.GroupDescriptor, w.gdBlocks*groupsPerDescriptorBlock) + inodeTableSizePerGroup := inodesPerGroup * inodeSize / BlockSize + var totalUsedBlocks, totalUsedInodes uint32 + for g := uint32(0); g < groups; g++ { + var b [BlockSize * 2]byte + var dirCount, usedInodeCount, usedBlockCount uint16 + + // Block bitmap + if (g+1)*blocksPerGroup <= validDataSize { + // This group is fully allocated. + for j := range b[:BlockSize] { + b[j] = 0xff + } + usedBlockCount = blocksPerGroup + } else if g*blocksPerGroup < validDataSize { + for j := uint32(0); j < validDataSize-g*blocksPerGroup; j++ { + b[j/8] |= 1 << (j % 8) + usedBlockCount++ + } + } + if g == 0 { + // Unused group descriptor blocks should be cleared. + for j := 1 + usedGdBlocks; j < 1+w.gdBlocks; j++ { + b[j/8] &^= 1 << (j % 8) + usedBlockCount-- + } + } + if g == groups-1 && diskSize%blocksPerGroup != 0 { + // Blocks that aren't present in the disk should be marked as + // allocated. + for j := diskSize % blocksPerGroup; j < blocksPerGroup; j++ { + b[j/8] |= 1 << (j % 8) + usedBlockCount++ + } + } + // Inode bitmap + for j := uint32(0); j < inodesPerGroup; j++ { + ino := format.InodeNumber(1 + g*inodesPerGroup + j) + inode := w.getInode(ino) + if ino < inodeFirst || inode != nil { + b[BlockSize+j/8] |= 1 << (j % 8) + usedInodeCount++ + } + if inode != nil && inode.Mode&format.TypeMask == format.S_IFDIR { + dirCount++ + } + } + _, err := w.write(b[:]) + if err != nil { + return err + } + gds[g] = format.GroupDescriptor{ + BlockBitmapLow: bitmapOffset + 2*g, + InodeBitmapLow: bitmapOffset + 2*g + 1, + InodeTableLow: inodeTableOffset + g*inodeTableSizePerGroup, + UsedDirsCountLow: dirCount, + FreeInodesCountLow: uint16(inodesPerGroup) - usedInodeCount, + FreeBlocksCountLow: blocksPerGroup - usedBlockCount, + } + + totalUsedBlocks += uint32(usedBlockCount) + totalUsedInodes += uint32(usedInodeCount) + } + + // Zero up to the disk size. + _, err = w.zero(int64(diskSize-bitmapOffset-bitmapSize) * BlockSize) + if err != nil { + return err + } + + // Write the block descriptors + w.seekBlock(1) + if w.err != nil { + return w.err + } + err = binary.Write(w.bw, binary.LittleEndian, gds) + if err != nil { + return err + } + + // Write the super block + var blk [BlockSize]byte + b := bytes.NewBuffer(blk[:1024]) + sb := &format.SuperBlock{ + InodesCount: inodesPerGroup * groups, + BlocksCountLow: diskSize, + FreeBlocksCountLow: blocksPerGroup*groups - totalUsedBlocks, + FreeInodesCount: inodesPerGroup*groups - totalUsedInodes, + FirstDataBlock: 0, + LogBlockSize: 2, // 2^(10 + 2) + LogClusterSize: 2, + BlocksPerGroup: blocksPerGroup, + ClustersPerGroup: blocksPerGroup, + InodesPerGroup: inodesPerGroup, + Magic: format.SuperBlockMagic, + State: 1, // cleanly unmounted + Errors: 1, // continue on error? + CreatorOS: 0, // Linux + RevisionLevel: 1, // dynamic inode sizes + FirstInode: inodeFirst, + LpfInode: inodeLostAndFound, + InodeSize: inodeSize, + FeatureCompat: format.CompatSparseSuper2 | format.CompatExtAttr, + FeatureIncompat: format.IncompatFiletype | format.IncompatExtents | format.IncompatFlexBg, + FeatureRoCompat: format.RoCompatLargeFile | format.RoCompatHugeFile | format.RoCompatExtraIsize | format.RoCompatReadonly, + MinExtraIsize: extraIsize, + WantExtraIsize: extraIsize, + LogGroupsPerFlex: 31, + } + if w.supportInlineData { + sb.FeatureIncompat |= format.IncompatInlineData + } + _ = binary.Write(b, binary.LittleEndian, sb) + w.seekBlock(0) + if _, err := w.write(blk[:]); err != nil { + return err + } + w.seekBlock(diskSize) + return w.err +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go new file mode 100644 index 0000000000..9dc4c4e164 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go @@ -0,0 +1,411 @@ +package format + +type SuperBlock struct { + InodesCount uint32 + BlocksCountLow uint32 + RootBlocksCountLow uint32 + FreeBlocksCountLow uint32 + FreeInodesCount uint32 + FirstDataBlock uint32 + LogBlockSize uint32 + LogClusterSize uint32 + BlocksPerGroup uint32 + ClustersPerGroup uint32 + InodesPerGroup uint32 + Mtime uint32 + Wtime uint32 + MountCount uint16 + MaxMountCount uint16 + Magic uint16 + State uint16 + Errors uint16 + MinorRevisionLevel uint16 + LastCheck uint32 + CheckInterval uint32 + CreatorOS uint32 + RevisionLevel uint32 + DefaultReservedUid uint16 + DefaultReservedGid uint16 + FirstInode uint32 + InodeSize uint16 + BlockGroupNr uint16 + FeatureCompat CompatFeature + FeatureIncompat IncompatFeature + FeatureRoCompat RoCompatFeature + UUID [16]uint8 + VolumeName [16]byte + LastMounted [64]byte + AlgorithmUsageBitmap uint32 + PreallocBlocks uint8 + PreallocDirBlocks uint8 + ReservedGdtBlocks uint16 + JournalUUID [16]uint8 + JournalInum uint32 + JournalDev uint32 + LastOrphan uint32 + HashSeed [4]uint32 + DefHashVersion uint8 + JournalBackupType uint8 + DescSize uint16 + DefaultMountOpts uint32 + FirstMetaBg uint32 + MkfsTime uint32 + JournalBlocks [17]uint32 + BlocksCountHigh uint32 + RBlocksCountHigh uint32 + FreeBlocksCountHigh uint32 + MinExtraIsize uint16 + WantExtraIsize uint16 + Flags uint32 + RaidStride uint16 + MmpInterval uint16 + MmpBlock uint64 + RaidStripeWidth uint32 + LogGroupsPerFlex uint8 + ChecksumType uint8 + ReservedPad uint16 + KbytesWritten uint64 + SnapshotInum uint32 + SnapshotID uint32 + SnapshotRBlocksCount uint64 + SnapshotList uint32 + ErrorCount uint32 + FirstErrorTime uint32 + FirstErrorInode uint32 + FirstErrorBlock uint64 + FirstErrorFunc [32]uint8 + FirstErrorLine uint32 + LastErrorTime uint32 + LastErrorInode uint32 + LastErrorLine uint32 + LastErrorBlock uint64 + LastErrorFunc [32]uint8 + MountOpts [64]uint8 + UserQuotaInum uint32 + GroupQuotaInum uint32 + OverheadBlocks uint32 + BackupBgs [2]uint32 + EncryptAlgos [4]uint8 + EncryptPwSalt [16]uint8 + LpfInode uint32 + ProjectQuotaInum uint32 + ChecksumSeed uint32 + WtimeHigh uint8 + MtimeHigh uint8 + MkfsTimeHigh uint8 + LastcheckHigh uint8 + FirstErrorTimeHigh uint8 + LastErrorTimeHigh uint8 + Pad [2]uint8 + Reserved [96]uint32 + Checksum uint32 +} + +const SuperBlockMagic uint16 = 0xef53 + +type CompatFeature uint32 +type IncompatFeature uint32 +type RoCompatFeature uint32 + +const ( + CompatDirPrealloc CompatFeature = 0x1 + CompatImagicInodes CompatFeature = 0x2 + CompatHasJournal CompatFeature = 0x4 + CompatExtAttr CompatFeature = 0x8 + CompatResizeInode CompatFeature = 0x10 + CompatDirIndex CompatFeature = 0x20 + CompatLazyBg CompatFeature = 0x40 + CompatExcludeInode CompatFeature = 0x80 + CompatExcludeBitmap CompatFeature = 0x100 + CompatSparseSuper2 CompatFeature = 0x200 + + IncompatCompression IncompatFeature = 0x1 + IncompatFiletype IncompatFeature = 0x2 + IncompatRecover IncompatFeature = 0x4 + IncompatJournalDev IncompatFeature = 0x8 + IncompatMetaBg IncompatFeature = 0x10 + IncompatExtents IncompatFeature = 0x40 + Incompat_64Bit IncompatFeature = 0x80 + IncompatMmp IncompatFeature = 0x100 + IncompatFlexBg IncompatFeature = 0x200 + IncompatEaInode IncompatFeature = 0x400 + IncompatDirdata IncompatFeature = 0x1000 + IncompatCsumSeed IncompatFeature = 0x2000 + IncompatLargedir IncompatFeature = 0x4000 + IncompatInlineData IncompatFeature = 0x8000 + IncompatEncrypt IncompatFeature = 0x10000 + + RoCompatSparseSuper RoCompatFeature = 0x1 + RoCompatLargeFile RoCompatFeature = 0x2 + RoCompatBtreeDir RoCompatFeature = 0x4 + RoCompatHugeFile RoCompatFeature = 0x8 + RoCompatGdtCsum RoCompatFeature = 0x10 + RoCompatDirNlink RoCompatFeature = 0x20 + RoCompatExtraIsize RoCompatFeature = 0x40 + RoCompatHasSnapshot RoCompatFeature = 0x80 + RoCompatQuota RoCompatFeature = 0x100 + RoCompatBigalloc RoCompatFeature = 0x200 + RoCompatMetadataCsum RoCompatFeature = 0x400 + RoCompatReplica RoCompatFeature = 0x800 + RoCompatReadonly RoCompatFeature = 0x1000 + RoCompatProject RoCompatFeature = 0x2000 +) + +type BlockGroupFlag uint16 + +const ( + BlockGroupInodeUninit BlockGroupFlag = 0x1 + BlockGroupBlockUninit BlockGroupFlag = 0x2 + BlockGroupInodeZeroed BlockGroupFlag = 0x4 +) + +type GroupDescriptor struct { + BlockBitmapLow uint32 + InodeBitmapLow uint32 + InodeTableLow uint32 + FreeBlocksCountLow uint16 + FreeInodesCountLow uint16 + UsedDirsCountLow uint16 + Flags BlockGroupFlag + ExcludeBitmapLow uint32 + BlockBitmapCsumLow uint16 + InodeBitmapCsumLow uint16 + ItableUnusedLow uint16 + Checksum uint16 +} + +type GroupDescriptor64 struct { + GroupDescriptor + BlockBitmapHigh uint32 + InodeBitmapHigh uint32 + InodeTableHigh uint32 + FreeBlocksCountHigh uint16 + FreeInodesCountHigh uint16 + UsedDirsCountHigh uint16 + ItableUnusedHigh uint16 + ExcludeBitmapHigh uint32 + BlockBitmapCsumHigh uint16 + InodeBitmapCsumHigh uint16 + Reserved uint32 +} + +const ( + S_IXOTH = 0x1 + S_IWOTH = 0x2 + S_IROTH = 0x4 + S_IXGRP = 0x8 + S_IWGRP = 0x10 + S_IRGRP = 0x20 + S_IXUSR = 0x40 + S_IWUSR = 0x80 + S_IRUSR = 0x100 + S_ISVTX = 0x200 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xA000 + S_IFSOCK = 0xC000 + + TypeMask uint16 = 0xF000 +) + +type InodeNumber uint32 + +const ( + InodeRoot = 2 +) + +type Inode struct { + Mode uint16 + Uid uint16 + SizeLow uint32 + Atime uint32 + Ctime uint32 + Mtime uint32 + Dtime uint32 + Gid uint16 + LinksCount uint16 + BlocksLow uint32 + Flags InodeFlag + Version uint32 + Block [60]byte + Generation uint32 + XattrBlockLow uint32 + SizeHigh uint32 + ObsoleteFragmentAddr uint32 + BlocksHigh uint16 + XattrBlockHigh uint16 + UidHigh uint16 + GidHigh uint16 + ChecksumLow uint16 + Reserved uint16 + ExtraIsize uint16 + ChecksumHigh uint16 + CtimeExtra uint32 + MtimeExtra uint32 + AtimeExtra uint32 + Crtime uint32 + CrtimeExtra uint32 + VersionHigh uint32 + Projid uint32 +} + +type InodeFlag uint32 + +const ( + InodeFlagSecRm InodeFlag = 0x1 + InodeFlagUnRm InodeFlag = 0x2 + InodeFlagCompressed InodeFlag = 0x4 + InodeFlagSync InodeFlag = 0x8 + InodeFlagImmutable InodeFlag = 0x10 + InodeFlagAppend InodeFlag = 0x20 + InodeFlagNoDump InodeFlag = 0x40 + InodeFlagNoAtime InodeFlag = 0x80 + InodeFlagDirtyCompressed InodeFlag = 0x100 + InodeFlagCompressedClusters InodeFlag = 0x200 + InodeFlagNoCompress InodeFlag = 0x400 + InodeFlagEncrypted InodeFlag = 0x800 + InodeFlagHashedIndex InodeFlag = 0x1000 + InodeFlagMagic InodeFlag = 0x2000 + InodeFlagJournalData InodeFlag = 0x4000 + InodeFlagNoTail InodeFlag = 0x8000 + InodeFlagDirSync InodeFlag = 0x10000 + InodeFlagTopDir InodeFlag = 0x20000 + InodeFlagHugeFile InodeFlag = 0x40000 + InodeFlagExtents InodeFlag = 0x80000 + InodeFlagEaInode InodeFlag = 0x200000 + InodeFlagEOFBlocks InodeFlag = 0x400000 + InodeFlagSnapfile InodeFlag = 0x01000000 + InodeFlagSnapfileDeleted InodeFlag = 0x04000000 + InodeFlagSnapfileShrunk InodeFlag = 0x08000000 + InodeFlagInlineData InodeFlag = 0x10000000 + InodeFlagProjectIDInherit InodeFlag = 0x20000000 + InodeFlagReserved InodeFlag = 0x80000000 +) + +const ( + MaxLinks = 65000 +) + +type ExtentHeader struct { + Magic uint16 + Entries uint16 + Max uint16 + Depth uint16 + Generation uint32 +} + +const ExtentHeaderMagic uint16 = 0xf30a + +type ExtentIndexNode struct { + Block uint32 + LeafLow uint32 + LeafHigh uint16 + Unused uint16 +} + +type ExtentLeafNode struct { + Block uint32 + Length uint16 + StartHigh uint16 + StartLow uint32 +} + +type ExtentTail struct { + Checksum uint32 +} + +type DirectoryEntry struct { + Inode InodeNumber + RecordLength uint16 + NameLength uint8 + FileType FileType + //Name []byte +} + +type FileType uint8 + +const ( + FileTypeUnknown FileType = 0x0 + FileTypeRegular FileType = 0x1 + FileTypeDirectory FileType = 0x2 + FileTypeCharacter FileType = 0x3 + FileTypeBlock FileType = 0x4 + FileTypeFIFO FileType = 0x5 + FileTypeSocket FileType = 0x6 + FileTypeSymbolicLink FileType = 0x7 +) + +type DirectoryEntryTail struct { + ReservedZero1 uint32 + RecordLength uint16 + ReservedZero2 uint8 + FileType uint8 + Checksum uint32 +} + +type DirectoryTreeRoot struct { + Dot DirectoryEntry + DotName [4]byte + DotDot DirectoryEntry + DotDotName [4]byte + ReservedZero uint32 + HashVersion uint8 + InfoLength uint8 + IndirectLevels uint8 + UnusedFlags uint8 + Limit uint16 + Count uint16 + Block uint32 + //Entries []DirectoryTreeEntry +} + +type DirectoryTreeNode struct { + FakeInode uint32 + FakeRecordLength uint16 + NameLength uint8 + FileType uint8 + Limit uint16 + Count uint16 + Block uint32 + //Entries []DirectoryTreeEntry +} + +type DirectoryTreeEntry struct { + Hash uint32 + Block uint32 +} + +type DirectoryTreeTail struct { + Reserved uint32 + Checksum uint32 +} + +type XAttrInodeBodyHeader struct { + Magic uint32 +} + +type XAttrHeader struct { + Magic uint32 + ReferenceCount uint32 + Blocks uint32 + Hash uint32 + Checksum uint32 + Reserved [3]uint32 +} + +const XAttrHeaderMagic uint32 = 0xea020000 + +type XAttrEntry struct { + NameLength uint8 + NameIndex uint8 + ValueOffset uint16 + ValueInum uint32 + ValueSize uint32 + Hash uint32 + //Name []byte +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go new file mode 100644 index 0000000000..5fcc3ba78a --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go @@ -0,0 +1,270 @@ +package tar2ext4 + +import ( + "archive/tar" + "bufio" + "bytes" + "encoding/binary" + "github.com/pkg/errors" + "io" + "io/ioutil" + "os" + "path" + "strings" + "unsafe" + + "github.com/Microsoft/hcsshim/ext4/dmverity" + "github.com/Microsoft/hcsshim/ext4/internal/compactext4" + "github.com/Microsoft/hcsshim/ext4/internal/format" +) + +type params struct { + convertWhiteout bool + appendVhdFooter bool + appendDMVerity bool + ext4opts []compactext4.Option +} + +// Option is the type for optional parameters to Convert. +type Option func(*params) + +// ConvertWhiteout instructs the converter to convert OCI-style whiteouts +// (beginning with .wh.) to overlay-style whiteouts. +func ConvertWhiteout(p *params) { + p.convertWhiteout = true +} + +// AppendVhdFooter instructs the converter to add a fixed VHD footer to the +// file. +func AppendVhdFooter(p *params) { + p.appendVhdFooter = true +} + +// AppendDMVerity instructs the converter to add a dmverity merkle tree for +// the ext4 filesystem after the filesystem and before the optional VHD footer +func AppendDMVerity(p *params) { + p.appendDMVerity = true +} + +// InlineData instructs the converter to write small files into the inode +// structures directly. This creates smaller images but currently is not +// compatible with DAX. +func InlineData(p *params) { + p.ext4opts = append(p.ext4opts, compactext4.InlineData) +} + +// MaximumDiskSize instructs the writer to limit the disk size to the specified +// value. This also reserves enough metadata space for the specified disk size. +// If not provided, then 16GB is the default. +func MaximumDiskSize(size int64) Option { + return func(p *params) { + p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size)) + } +} + +const ( + whiteoutPrefix = ".wh." + opaqueWhiteout = ".wh..wh..opq" + ext4blocksize = compactext4.BlockSize +) + +// Convert writes a compact ext4 file system image that contains the files in the +// input tar stream. +func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error { + var p params + for _, opt := range options { + opt(&p) + } + t := tar.NewReader(bufio.NewReader(r)) + fs := compactext4.NewWriter(w, p.ext4opts...) + for { + hdr, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if p.convertWhiteout { + dir, name := path.Split(hdr.Name) + if strings.HasPrefix(name, whiteoutPrefix) { + if name == opaqueWhiteout { + // Update the directory with the appropriate xattr. + f, err := fs.Stat(dir) + if err != nil { + return err + } + f.Xattrs["trusted.overlay.opaque"] = []byte("y") + err = fs.Create(dir, f) + if err != nil { + return err + } + } else { + // Create an overlay-style whiteout. + f := &compactext4.File{ + Mode: compactext4.S_IFCHR, + Devmajor: 0, + Devminor: 0, + } + err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f) + if err != nil { + return err + } + } + + continue + } + } + + if hdr.Typeflag == tar.TypeLink { + err = fs.Link(hdr.Linkname, hdr.Name) + if err != nil { + return err + } + } else { + f := &compactext4.File{ + Mode: uint16(hdr.Mode), + Atime: hdr.AccessTime, + Mtime: hdr.ModTime, + Ctime: hdr.ChangeTime, + Crtime: hdr.ModTime, + Size: hdr.Size, + Uid: uint32(hdr.Uid), + Gid: uint32(hdr.Gid), + Linkname: hdr.Linkname, + Devmajor: uint32(hdr.Devmajor), + Devminor: uint32(hdr.Devminor), + Xattrs: make(map[string][]byte), + } + for key, value := range hdr.PAXRecords { + const xattrPrefix = "SCHILY.xattr." + if strings.HasPrefix(key, xattrPrefix) { + f.Xattrs[key[len(xattrPrefix):]] = []byte(value) + } + } + + var typ uint16 + switch hdr.Typeflag { + case tar.TypeReg, tar.TypeRegA: + typ = compactext4.S_IFREG + case tar.TypeSymlink: + typ = compactext4.S_IFLNK + case tar.TypeChar: + typ = compactext4.S_IFCHR + case tar.TypeBlock: + typ = compactext4.S_IFBLK + case tar.TypeDir: + typ = compactext4.S_IFDIR + case tar.TypeFifo: + typ = compactext4.S_IFIFO + } + f.Mode &= ^compactext4.TypeMask + f.Mode |= typ + err = fs.CreateWithParents(hdr.Name, f) + if err != nil { + return err + } + _, err = io.Copy(fs, t) + if err != nil { + return err + } + } + } + err := fs.Close() + if err != nil { + return err + } + + if p.appendDMVerity { + ext4size, err := w.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + // Rewind the stream and then read it all into a []byte for + // dmverity processing + _, err = w.Seek(0, io.SeekStart) + if err != nil { + return err + } + data, err := ioutil.ReadAll(w) + if err != nil { + return err + } + + mtree, err := dmverity.MerkleTree(data) + if err != nil { + return errors.Wrap(err, "failed to build merkle tree") + } + + // Write dmverity superblock and then the merkle tree after the end of the + // ext4 filesystem + _, err = w.Seek(0, io.SeekEnd) + if err != nil { + return err + } + superblock := dmverity.NewDMVeritySuperblock(uint64(ext4size)) + err = binary.Write(w, binary.LittleEndian, superblock) + if err != nil { + return err + } + // pad the superblock + sbsize := int(unsafe.Sizeof(*superblock)) + padding := bytes.Repeat([]byte{0}, ext4blocksize-(sbsize%ext4blocksize)) + _, err = w.Write(padding) + if err != nil { + return err + } + // write the tree + _, err = w.Write(mtree) + if err != nil { + return err + } + } + + if p.appendVhdFooter { + size, err := w.Seek(0, io.SeekEnd) + if err != nil { + return err + } + err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size)) + if err != nil { + return err + } + } + return nil +} + +// ReadExt4SuperBlock reads and returns ext4 super block from VHD +// +// The layout on disk is as follows: +// | Group 0 padding | - 1024 bytes +// | ext4 SuperBlock | - 1 block +// | Group Descriptors | - many blocks +// | Reserved GDT Blocks | - many blocks +// | Data Block Bitmap | - 1 block +// | inode Bitmap | - 1 block +// | inode Table | - many blocks +// | Data Blocks | - many blocks +// +// More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout +// +// Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock +func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) { + vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer vhd.Close() + + // Skip padding at the start + if _, err := vhd.Seek(1024, io.SeekStart); err != nil { + return nil, err + } + var sb format.SuperBlock + if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil { + return nil, err + } + return &sb, nil +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go b/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go new file mode 100644 index 0000000000..99f6e3a304 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go @@ -0,0 +1,76 @@ +package tar2ext4 + +import ( + "bytes" + "crypto/rand" + "encoding/binary" +) + +// Constants for the VHD footer +const ( + cookieMagic = "conectix" + featureMask = 0x2 + fileFormatVersionMagic = 0x00010000 + fixedDataOffset = -1 + creatorVersionMagic = 0x000a0000 + diskTypeFixed = 2 +) + +type vhdFooter struct { + Cookie [8]byte + Features uint32 + FileFormatVersion uint32 + DataOffset int64 + TimeStamp uint32 + CreatorApplication [4]byte + CreatorVersion uint32 + CreatorHostOS [4]byte + OriginalSize int64 + CurrentSize int64 + DiskGeometry uint32 + DiskType uint32 + Checksum uint32 + UniqueID [16]uint8 + SavedState uint8 + Reserved [427]uint8 +} + +func makeFixedVHDFooter(size int64) *vhdFooter { + footer := &vhdFooter{ + Features: featureMask, + FileFormatVersion: fileFormatVersionMagic, + DataOffset: fixedDataOffset, + CreatorVersion: creatorVersionMagic, + OriginalSize: size, + CurrentSize: size, + DiskType: diskTypeFixed, + UniqueID: generateUUID(), + } + copy(footer.Cookie[:], cookieMagic) + footer.Checksum = calculateCheckSum(footer) + return footer +} + +func calculateCheckSum(footer *vhdFooter) uint32 { + oldchk := footer.Checksum + footer.Checksum = 0 + + buf := &bytes.Buffer{} + _ = binary.Write(buf, binary.BigEndian, footer) + + var chk uint32 + bufBytes := buf.Bytes() + for i := 0; i < len(bufBytes); i++ { + chk += uint32(bufBytes[i]) + } + footer.Checksum = oldchk + return uint32(^chk) +} + +func generateUUID() [16]byte { + res := [16]byte{} + if _, err := rand.Read(res[:]); err != nil { + panic(err) + } + return res +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/go.mod b/test/vendor/github.com/Microsoft/hcsshim/go.mod index fe1e7958ad..48db15a4ad 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/go.mod +++ b/test/vendor/github.com/Microsoft/hcsshim/go.mod @@ -11,6 +11,7 @@ require ( github.com/containerd/ttrpc v1.0.2 github.com/containerd/typeurl v1.0.2 github.com/gogo/protobuf v1.3.2 + github.com/google/go-containerregistry v0.5.1 github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 github.com/mattn/go-shellwords v1.0.6 github.com/opencontainers/runc v1.0.0-rc93 diff --git a/test/vendor/github.com/Microsoft/hcsshim/go.sum b/test/vendor/github.com/Microsoft/hcsshim/go.sum index 56925418aa..1cdb730911 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/go.sum +++ b/test/vendor/github.com/Microsoft/hcsshim/go.sum @@ -9,6 +9,7 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -55,7 +56,9 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -157,6 +160,8 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -201,6 +206,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -214,9 +220,17 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -253,11 +267,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -289,6 +307,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -315,6 +334,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -323,6 +344,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -334,6 +356,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -360,6 +383,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -386,9 +410,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -401,6 +428,7 @@ github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3Zk github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -416,26 +444,33 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -444,6 +479,7 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -507,6 +543,7 @@ github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -648,6 +685,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -659,7 +697,9 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -677,6 +717,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= @@ -698,6 +739,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -724,6 +766,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -752,9 +795,11 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -768,6 +813,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -786,7 +832,11 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -804,10 +854,12 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -829,6 +881,8 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -873,6 +927,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -881,8 +936,12 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -891,6 +950,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go index bfe83eab44..5ac526102d 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go @@ -45,10 +45,18 @@ type LCOWMappedDirectory struct { ReadOnly bool `json:"ReadOnly,omitempty"` } +// LCOWMappedLayer is one of potentially multiple read-only layers mapped on a VPMem device +type LCOWMappedLayer struct { + DeviceOffsetInBytes uint64 `json:"DeviceOffsetInBytes,omitempty"` + DeviceSizeInBytes uint64 `json:"DeviceSizeInBytes,omitempty"` +} + // Read-only layers over VPMem type LCOWMappedVPMemDevice struct { DeviceNumber uint32 `json:"DeviceNumber,omitempty"` MountPath string `json:"MountPath,omitempty"` + // Mapping is ignored when MountPath is not empty + MappingInfo *LCOWMappedLayer `json:"MappingInfo,omitempty"` } type LCOWMappedVPCIDevice struct { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 0000000000..9ef322f615 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go index b5ab18a7ca..35ae9a8a39 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/layers/layers.go @@ -212,14 +212,14 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) if !uvm.DevicesPhysicallyBacked() { // We first try vPMEM and if it is full or the file is too large we // fall back to SCSI. - uvmPath, err = uvm.AddVPMEM(ctx, layerPath) + uvmPath, err = uvm.AddVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, "layerType": "vpmem", }).Debug("Added LCOW layer") return uvmPath, nil - } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMEMLayerSize { + } else if err != uvmpkg.ErrNoAvailableLocation && err != uvmpkg.ErrMaxVPMemLayerSize { return "", fmt.Errorf("failed to add VPMEM layer: %s", err) } } @@ -239,7 +239,7 @@ func addLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath string) error { // Assume it was added to vPMEM and fall back to SCSI - err := uvm.RemoveVPMEM(ctx, layerPath) + err := uvm.RemoveVPMem(ctx, layerPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go new file mode 100644 index 0000000000..6381dfd887 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -0,0 +1,316 @@ +package memory + +import ( + "github.com/pkg/errors" +) + +const ( + minimumClassSize = MegaByte + maximumClassSize = 4 * GigaByte + memoryClassNumber = 7 +) + +var ( + ErrInvalidMemoryClass = errors.New("invalid memory class") + ErrEarlyMerge = errors.New("not all children have been freed") + ErrEmptyPoolOperation = errors.New("operation on empty pool") +) + +// GetMemoryClassType returns the minimum memory class type that can hold a device of +// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset +// intervals in between, for a total of 7 different classes. This function does not +// do a validity check +func GetMemoryClassType(s uint64) classType { + s = (s - 1) >> 20 + memCls := uint32(0) + for s > 0 { + s = s >> 2 + memCls++ + } + return classType(memCls) +} + +// GetMemoryClassSize returns size in bytes for a given memory class +func GetMemoryClassSize(memCls classType) (uint64, error) { + if memCls >= memoryClassNumber { + return 0, ErrInvalidMemoryClass + } + return minimumClassSize << (2 * memCls), nil +} + +// region represents a contiguous memory block +type region struct { + // parent region that has been split into 4 + parent *region + class classType + // offset represents offset in bytes + offset uint64 +} + +// memoryPool tracks free and busy (used) memory regions +type memoryPool struct { + free map[uint64]*region + busy map[uint64]*region +} + +// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c +// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory +// space. +// +// There are a total of 7 different region sizes that can be allocated, with the smallest +// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device). +// +// For efficiency and to reduce fragmentation an entire region is allocated when requested. +// When there's no available region of requested size, we try to allocate more memory for +// this particular size by splitting the next available larger region into smaller ones, e.g. +// if there's no region available for size class 0, we try splitting a region from class 1, +// then class 2 etc, until we are able to do so or hit the upper limit. +type PoolAllocator struct { + pools [memoryClassNumber]*memoryPool +} + +var _ MappedRegion = ®ion{} +var _ Allocator = &PoolAllocator{} + +func (r *region) Offset() uint64 { + return r.offset +} + +func (r *region) Size() uint64 { + sz, err := GetMemoryClassSize(r.class) + if err != nil { + panic(err) + } + return sz +} + +func (r *region) Type() classType { + return r.class +} + +func newEmptyMemoryPool() *memoryPool { + return &memoryPool{ + free: make(map[uint64]*region), + busy: make(map[uint64]*region), + } +} + +func NewPoolMemoryAllocator() PoolAllocator { + pa := PoolAllocator{} + p := newEmptyMemoryPool() + // by default we allocate a single region with maximum possible size (class type) + p.free[0] = ®ion{ + class: memoryClassNumber - 1, + offset: 0, + } + pa.pools[memoryClassNumber-1] = p + return pa +} + +// Allocate checks memory region pool for the given `size` and returns a free region with +// minimal offset, if none available tries expanding matched memory pool. +// +// Internally it's done via moving a region from free pool into a busy pool +func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { + memCls := GetMemoryClassType(size) + if memCls >= memoryClassNumber { + return nil, ErrInvalidMemoryClass + } + + // find region with the smallest offset + nextCls, nextOffset, err := pa.findNextOffset(memCls) + if err != nil { + return nil, err + } + + // this means that there are no more regions for the current class, try expanding + if nextCls != memCls { + if err := pa.split(memCls); err != nil { + if err == ErrInvalidMemoryClass { + return nil, ErrNotEnoughSpace + } + return nil, err + } + } + + if err := pa.markBusy(memCls, nextOffset); err != nil { + return nil, err + } + + // by this point memory pool for memCls should have been created, + // either prior or during split call + if r := pa.pools[memCls].busy[nextOffset]; r != nil { + return r, nil + } + + return nil, ErrNotEnoughSpace +} + +// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into +// a bigger one +func (pa *PoolAllocator) Release(reg MappedRegion) error { + mp := pa.pools[reg.Type()] + if mp == nil { + return ErrEmptyPoolOperation + } + + err := pa.markFree(reg.Type(), reg.Offset()) + if err != nil { + return err + } + + n := mp.free[reg.Offset()] + if n == nil { + return ErrNotAllocated + } + if err := pa.merge(n.parent); err != nil { + if err != ErrEarlyMerge { + return err + } + } + return nil +} + +// findNextOffset finds next region location for a given memCls +func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) { + for mc := memCls; mc < memoryClassNumber; mc++ { + pi := pa.pools[mc] + if pi == nil || len(pi.free) == 0 { + continue + } + + target := maximumClassSize + for offset := range pi.free { + if offset < target { + target = offset + } + } + return mc, target, nil + } + return 0, 0, ErrNotEnoughSpace +} + +// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit +func (pa *PoolAllocator) split(clsType classType) error { + nextClsType := clsType + 1 + if nextClsType >= memoryClassNumber { + return ErrInvalidMemoryClass + } + + nextPool := pa.pools[nextClsType] + if nextPool == nil { + nextPool = newEmptyMemoryPool() + pa.pools[nextClsType] = nextPool + } + + cls, offset, err := pa.findNextOffset(nextClsType) + if err != nil { + return err + } + // not enough memory in the next class, try to recursively expand + if cls != nextClsType { + if err := pa.split(nextClsType); err != nil { + return err + } + } + + if err := pa.markBusy(nextClsType, offset); err != nil { + return err + } + + // memCls validity has been checked already, we can ignore the error + clsSize, _ := GetMemoryClassSize(clsType) + + nextReg := nextPool.busy[offset] + if nextReg == nil { + return ErrNotAllocated + } + + // expand memCls + cp := pa.pools[clsType] + if cp == nil { + cp = newEmptyMemoryPool() + pa.pools[clsType] = cp + } + // create 4 smaller regions + for i := uint64(0); i < 4; i++ { + offset := nextReg.offset + i*clsSize + reg := ®ion{ + parent: nextReg, + class: clsType, + offset: offset, + } + cp.free[offset] = reg + } + return nil +} + +func (pa *PoolAllocator) merge(parent *region) error { + // nothing to merge + if parent == nil { + return nil + } + + childCls := parent.class - 1 + childPool := pa.pools[childCls] + // no child nodes to merge, try to merge parent + if childPool == nil { + return pa.merge(parent.parent) + } + + childSize, err := GetMemoryClassSize(childCls) + if err != nil { + return err + } + + // check if all the child nodes are free + var children []*region + for i := uint64(0); i < 4; i++ { + child, free := childPool.free[parent.offset+i*childSize] + if !free { + return ErrEarlyMerge + } + children = append(children, child) + } + + // at this point all the child nodes will be free and we can merge + for _, child := range children { + delete(childPool.free, child.offset) + } + + if err := pa.markFree(parent.class, parent.offset); err != nil { + return err + } + + return pa.merge(parent.parent) +} + +// markFree internally moves a region with `offset` from busy to free map +func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.busy[offset]; exists { + clsPool.free[offset] = reg + delete(clsPool.busy, offset) + return nil + } + return ErrNotAllocated +} + +// markBusy internally moves a region with `offset` from free to busy map +func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.free[offset]; exists { + clsPool.busy[offset] = reg + delete(clsPool.free, offset) + return nil + } + return ErrNotAllocated +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go new file mode 100644 index 0000000000..7cf4368a95 --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go @@ -0,0 +1,28 @@ +package memory + +import "github.com/pkg/errors" + +type classType uint32 + +const ( + MegaByte = uint64(1024 * 1024) + GigaByte = 1024 * MegaByte +) + +var ( + ErrNotEnoughSpace = errors.New("not enough space") + ErrNotAllocated = errors.New("no memory allocated at the given offset") +) + +// MappedRegion represents a memory block with an offset +type MappedRegion interface { + Offset() uint64 + Size() uint64 + Type() classType +} + +// Allocator is an interface for memory allocation +type Allocator interface { + Allocate(uint64) (MappedRegion, error) + Release(MappedRegion) error +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go index db3f429253..e821837ed0 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/oci/uvm.go @@ -134,6 +134,7 @@ const ( annotationBootFilesRootPath = "io.microsoft.virtualmachine.lcow.bootfilesrootpath" annotationKernelDirectBoot = "io.microsoft.virtualmachine.lcow.kerneldirectboot" annotationVPCIEnabled = "io.microsoft.virtualmachine.lcow.vpcienabled" + annotationVPMemNoMultiMapping = "io.microsoft.virtualmachine.lcow.vpmem.nomultimapping" annotationStorageQoSBandwidthMaximum = "io.microsoft.virtualmachine.storageqos.bandwidthmaximum" annotationStorageQoSIopsMaximum = "io.microsoft.virtualmachine.storageqos.iopsmaximum" annotationFullyPhysicallyBacked = "io.microsoft.virtualmachine.fullyphysicallybacked" @@ -456,6 +457,7 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( lopts.ProcessorWeight = ParseAnnotationsCPUWeight(ctx, s, annotationProcessorWeight, lopts.ProcessorWeight) lopts.VPMemDeviceCount = parseAnnotationsUint32(ctx, s.Annotations, annotationVPMemCount, lopts.VPMemDeviceCount) lopts.VPMemSizeBytes = parseAnnotationsUint64(ctx, s.Annotations, annotationVPMemSize, lopts.VPMemSizeBytes) + lopts.VPMemNoMultiMapping = parseAnnotationsBool(ctx, s.Annotations, annotationVPMemNoMultiMapping, lopts.VPMemNoMultiMapping) lopts.StorageQoSBandwidthMaximum = ParseAnnotationsStorageBps(ctx, s, annotationStorageQoSBandwidthMaximum, lopts.StorageQoSBandwidthMaximum) lopts.StorageQoSIopsMaximum = ParseAnnotationsStorageIops(ctx, s, annotationStorageQoSIopsMaximum, lopts.StorageQoSIopsMaximum) lopts.VPCIEnabled = parseAnnotationsBool(ctx, s.Annotations, annotationVPCIEnabled, lopts.VPCIEnabled) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go index 92c7e8d478..de18ca1c60 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go @@ -14,6 +14,10 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "github.com/Microsoft/hcsshim/internal/gcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" @@ -22,9 +26,6 @@ import ( "github.com/Microsoft/hcsshim/internal/processorinfo" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) type PreferredRootFSType int @@ -70,6 +71,7 @@ type OptionsLCOW struct { OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken. VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`. + VPMemNoMultiMapping bool // Disables LCOW layer multi mapping PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` EnableColdDiscardHint bool // Whether the HCS should use cold discard hints. Defaults to false VPCIEnabled bool // Whether the kernel should enable pci @@ -114,6 +116,7 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { OutputHandler: parseLogrus(id), VPMemDeviceCount: DefaultVPMEMCount, VPMemSizeBytes: DefaultVPMemSizeBytes, + VPMemNoMultiMapping: osversion.Get().Build < osversion.V19H1, PreferredRootFSType: PreferredRootFSTypeInitRd, EnableColdDiscardHint: false, VPCIEnabled: false, @@ -170,6 +173,7 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error physicallyBacked: !opts.AllowOvercommit, devicesPhysicallyBacked: opts.FullyPhysicallyBacked, createOpts: opts, + vpmemMultiMapping: !opts.VPMemNoMultiMapping, } defer func() { @@ -291,11 +295,35 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error ImageFormat: imageFormat, }, } - // Add to our internal structure - uvm.vpmemDevices[0] = &vpmemInfo{ - hostPath: opts.RootFSFile, - uvmPath: "/", - refCount: 1, + if uvm.vpmemMultiMapping { + pmem := newPackedVPMemDevice() + pmem.maxMappedDeviceCount = 1 + + st, err := os.Stat(rootfsFullPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) + } + devSize := pageAlign(uint64(st.Size())) + memReg, err := pmem.Allocate(devSize) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + } + defer func() { + if err != nil { + if err = pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debug("failed to release memory region") + } + } + }() + + dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) + if err := pmem.mapVHDLayer(ctx, dev); err != nil { + return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + } + uvm.vpmemDevicesMultiMapped[0] = pmem + } else { + dev := newDefaultVPMemInfo(opts.RootFSFile, "/") + uvm.vpmemDevicesDefault[0] = dev } } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go index 24b632865c..1e568db98b 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go @@ -7,12 +7,13 @@ import ( "sync" "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" + "github.com/Microsoft/hcsshim/internal/gcs" "github.com/Microsoft/hcsshim/internal/hcs" "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/hns" "github.com/Microsoft/hcsshim/internal/ncproxyttrpc" - "golang.org/x/sys/windows" ) // | WCOW | LCOW @@ -21,14 +22,6 @@ import ( // Read-Only Layer | VSMB | VPMEM // Mapped Directory | VSMB | PLAN9 -// vpmemInfo is an internal structure used for determining VPMem devices mapped to -// a Linux utility VM. -type vpmemInfo struct { - hostPath string - uvmPath string - refCount uint32 -} - type nicInfo struct { ID string Endpoint *hns.HNSEndpoint @@ -81,9 +74,11 @@ type UtilityVM struct { // VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for // booting from VHD. - vpmemDevices [MaxVPMEMCount]*vpmemInfo // Limited by ACPI size. - vpmemMaxCount uint32 // The max number of VPMem devices. - vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device. + vpmemMaxCount uint32 // The max number of VPMem devices. + vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device. + vpmemMultiMapping bool // Enable mapping multiple VHDs onto a single VPMem device + vpmemDevicesDefault [MaxVPMEMCount]*vPMemInfoDefault + vpmemDevicesMultiMapped [MaxVPMEMCount]*vPMemInfoMulti // SCSI devices that are mapped into a Windows or Linux utility VM scsiLocations [4][64]*SCSIMount // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though. diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go index 6d883220eb..061ea489b9 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go @@ -2,166 +2,190 @@ package uvm import ( "context" - "errors" "fmt" "os" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/guestrequest" "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/requesttype" - "github.com/sirupsen/logrus" ) const ( - lcowVPMEMLayerFmt = "/run/layers/p%d" + lcowDefaultVPMemLayerFmt = "/run/layers/p%d" ) var ( - // ErrMaxVPMEMLayerSize is the error returned when the size of `hostPath` is - // greater than the max vPMEM layer size set at create time. - ErrMaxVPMEMLayerSize = errors.New("layer size is to large for VPMEM max size") + // ErrMaxVPMemLayerSize is the error returned when the size of `hostPath` is + // greater than the max vPMem layer size set at create time. + ErrMaxVPMemLayerSize = errors.New("layer size is to large for VPMEM max size") ) -// findNextVPMEM finds the next available VPMem slot. +type vPMemInfoDefault struct { + hostPath string + uvmPath string + refCount uint32 +} + +func newDefaultVPMemInfo(hostPath, uvmPath string) *vPMemInfoDefault { + return &vPMemInfoDefault{ + hostPath: hostPath, + uvmPath: uvmPath, + refCount: 1, + } +} + +// findNextVPMemSlot finds next available VPMem slot. // -// The lock MUST be held when calling this function. -func (uvm *UtilityVM) findNextVPMEM(ctx context.Context, hostPath string) (uint32, error) { +// Lock MUST be held when calling this function. +func (uvm *UtilityVM) findNextVPMemSlot(ctx context.Context, hostPath string) (uint32, error) { for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if uvm.vpmemDevices[i] == nil { + if uvm.vpmemDevicesDefault[i] == nil { log.G(ctx).WithFields(logrus.Fields{ "hostPath": hostPath, "deviceNumber": i, - }).Debug("allocated VPMEM location") + }).Debug("allocated VPMem location") return i, nil } } return 0, ErrNoAvailableLocation } -// Lock must be held when calling this function -func (uvm *UtilityVM) findVPMEMDevice(ctx context.Context, findThisHostPath string) (uint32, error) { +// findVPMemSlot looks up `findThisHostPath` in already mounted VPMem devices +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) findVPMemSlot(ctx context.Context, findThisHostPath string) (uint32, error) { for i := uint32(0); i < uvm.vpmemMaxCount; i++ { - if vi := uvm.vpmemDevices[i]; vi != nil && vi.hostPath == findThisHostPath { + if vi := uvm.vpmemDevicesDefault[i]; vi != nil && vi.hostPath == findThisHostPath { log.G(ctx).WithFields(logrus.Fields{ "hostPath": vi.hostPath, "uvmPath": vi.uvmPath, "refCount": vi.refCount, "deviceNumber": i, - }).Debug("found VPMEM location") + }).Debug("found VPMem location") return i, nil } } return 0, ErrNotAttached } -// AddVPMEM adds a VPMEM disk to a utility VM at the next available location and +// addVPMemDefault adds a VPMem disk to a utility VM at the next available location and // returns the UVM path where the layer was mounted. -func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string) (_ string, err error) { - if uvm.operatingSystem != "linux" { - return "", errNotSupported +func (uvm *UtilityVM) addVPMemDefault(ctx context.Context, hostPath string) (_ string, err error) { + if devNumber, err := uvm.findVPMemSlot(ctx, hostPath); err == nil { + device := uvm.vpmemDevicesDefault[devNumber] + device.refCount++ + return device.uvmPath, nil } - uvm.m.Lock() - defer uvm.m.Unlock() + fi, err := os.Stat(hostPath) + if err != nil { + return "", err + } + if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes { + return "", ErrMaxVPMemLayerSize + } - var deviceNumber uint32 - deviceNumber, err = uvm.findVPMEMDevice(ctx, hostPath) + deviceNumber, err := uvm.findNextVPMemSlot(ctx, hostPath) if err != nil { - // We are going to add it so make sure it fits on vPMEM - fi, err := os.Stat(hostPath) - if err != nil { - return "", err - } - if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes { - return "", ErrMaxVPMEMLayerSize - } + return "", err + } + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.VirtualPMemDevice{ + HostPath: hostPath, + ReadOnly: true, + ImageFormat: "Vhd1", + }, + ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), + } + uvmPath := fmt.Sprintf(lcowDefaultVPMemLayerFmt, deviceNumber) + modification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: uvmPath, + }, + } - // It doesn't exist, so we're going to allocate and hot-add it - deviceNumber, err = uvm.findNextVPMEM(ctx, hostPath) - if err != nil { - return "", err - } + if err := uvm.modify(ctx, modification); err != nil { + return "", errors.Errorf("uvm::addVPMemDefault: failed to modify utility VM configuration: %s", err) + } - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Add, - Settings: hcsschema.VirtualPMemDevice{ - HostPath: hostPath, - ReadOnly: true, - ImageFormat: "Vhd1", - }, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - } + uvm.vpmemDevicesDefault[deviceNumber] = newDefaultVPMemInfo(hostPath, uvmPath) + return uvmPath, nil +} + +// removeVPMemDefault removes a VPMem disk from a Utility VM. If the `hostPath` is not +// attached returns `ErrNotAttached`. +func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) error { + deviceNumber, err := uvm.findVPMemSlot(ctx, hostPath) + if err != nil { + return err + } + + device := uvm.vpmemDevicesDefault[deviceNumber] + if device.refCount > 1 { + device.refCount-- + return nil + } - uvmPath := fmt.Sprintf(lcowVPMEMLayerFmt, deviceNumber) - modification.GuestRequest = guestrequest.GuestRequest{ + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), + GuestRequest: guestrequest.GuestRequest{ ResourceType: guestrequest.ResourceTypeVPMemDevice, - RequestType: requesttype.Add, + RequestType: requesttype.Remove, Settings: guestrequest.LCOWMappedVPMemDevice{ DeviceNumber: deviceNumber, - MountPath: uvmPath, + MountPath: device.uvmPath, }, - } + }, + } + if err := uvm.modify(ctx, modification); err != nil { + return errors.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) + } + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": device.hostPath, + "uvmPath": device.uvmPath, + "refCount": device.refCount, + "deviceNumber": deviceNumber, + }).Debug("removed VPMEM location") - if err := uvm.modify(ctx, modification); err != nil { - return "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err) - } + uvm.vpmemDevicesDefault[deviceNumber] = nil - uvm.vpmemDevices[deviceNumber] = &vpmemInfo{ - hostPath: hostPath, - uvmPath: uvmPath, - refCount: 1, - } - return uvmPath, nil - } - device := uvm.vpmemDevices[deviceNumber] - device.refCount++ - return device.uvmPath, nil + return nil } -// RemoveVPMEM removes a VPMEM disk from a Utility VM. If the `hostPath` is not -// attached returns `ErrNotAttached`. -func (uvm *UtilityVM) RemoveVPMEM(ctx context.Context, hostPath string) (err error) { +func (uvm *UtilityVM) AddVPMem(ctx context.Context, hostPath string) (string, error) { if uvm.operatingSystem != "linux" { - return errNotSupported + return "", errNotSupported } uvm.m.Lock() defer uvm.m.Unlock() - deviceNumber, err := uvm.findVPMEMDevice(ctx, hostPath) - if err != nil { - return err + if uvm.vpmemMultiMapping { + return uvm.addVPMemMappedDevice(ctx, hostPath) } + return uvm.addVPMemDefault(ctx, hostPath) +} - device := uvm.vpmemDevices[deviceNumber] - if device.refCount == 1 { - modification := &hcsschema.ModifySettingRequest{ - RequestType: requesttype.Remove, - ResourcePath: fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber), - GuestRequest: guestrequest.GuestRequest{ - ResourceType: guestrequest.ResourceTypeVPMemDevice, - RequestType: requesttype.Remove, - Settings: guestrequest.LCOWMappedVPMemDevice{ - DeviceNumber: deviceNumber, - MountPath: device.uvmPath, - }, - }, - } +func (uvm *UtilityVM) RemoveVPMem(ctx context.Context, hostPath string) error { + if uvm.operatingSystem != "linux" { + return errNotSupported + } - if err := uvm.modify(ctx, modification); err != nil { - return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) - } - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": device.hostPath, - "uvmPath": device.uvmPath, - "refCount": device.refCount, - "deviceNumber": deviceNumber, - }).Debug("removed VPMEM location") - uvm.vpmemDevices[deviceNumber] = nil - } else { - device.refCount-- + uvm.m.Lock() + defer uvm.m.Unlock() + + if uvm.vpmemMultiMapping { + return uvm.removeVPMemMappedDevice(ctx, hostPath) } - return nil + return uvm.removeVPMemDefault(ctx, hostPath) } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go new file mode 100644 index 0000000000..3faeff94db --- /dev/null +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem_mapped.go @@ -0,0 +1,307 @@ +package uvm + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/ext4/tar2ext4" + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/memory" + "github.com/Microsoft/hcsshim/internal/requesttype" +) + +const ( + PageSize = 0x1000 + MaxMappedDeviceCount = 1024 +) + +const lcowPackedVPMemLayerFmt = "/run/layers/p%d-%d-%d" + +type mappedDeviceInfo struct { + vPMemInfoDefault + mappedRegion memory.MappedRegion + sizeInBytes uint64 +} + +type vPMemInfoMulti struct { + memory.PoolAllocator + maxSize uint64 + maxMappedDeviceCount uint32 + mappings map[string]*mappedDeviceInfo +} + +func newVPMemMappedDevice(hostPath, uvmPath string, sizeBytes uint64, memReg memory.MappedRegion) *mappedDeviceInfo { + return &mappedDeviceInfo{ + vPMemInfoDefault: vPMemInfoDefault{ + hostPath: hostPath, + uvmPath: uvmPath, + refCount: 1, + }, + mappedRegion: memReg, + sizeInBytes: sizeBytes, + } +} + +func newPackedVPMemDevice() *vPMemInfoMulti { + return &vPMemInfoMulti{ + PoolAllocator: memory.NewPoolMemoryAllocator(), + maxSize: DefaultVPMemSizeBytes, + mappings: make(map[string]*mappedDeviceInfo), + maxMappedDeviceCount: MaxMappedDeviceCount, + } +} + +func pageAlign(t uint64) uint64 { + if t%PageSize == 0 { + return t + } + return (t/PageSize + 1) * PageSize +} + +// fileSystemSize retrieves ext4 fs SuperBlock and calculates the size of the actual file system +func fileSystemSize(vhdPath string) (uint64, error) { + sb, err := tar2ext4.ReadExt4SuperBlock(vhdPath) + if err != nil { + return 0, err + } + blockSize := uint64(1024 * (1 << sb.LogBlockSize)) + fsSize := blockSize * uint64(sb.BlocksCountLow) + return pageAlign(fsSize), nil +} + +// newMappedVPMemModifyRequest creates an hcsschema.ModifySettingsRequest to modify VPMem devices/mappings +// for the multi-mapping setup +func newMappedVPMemModifyRequest(ctx context.Context, rType string, deviceNumber uint32, md *mappedDeviceInfo, uvm *UtilityVM) (*hcsschema.ModifySettingRequest, error) { + request := &hcsschema.ModifySettingRequest{ + RequestType: rType, + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: rType, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: md.uvmPath, + MappingInfo: &guestrequest.LCOWMappedLayer{ + DeviceOffsetInBytes: md.mappedRegion.Offset(), + DeviceSizeInBytes: md.sizeInBytes, + }, + }, + }, + } + + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + switch rType { + case requesttype.Add: + if pmem == nil { + request.Settings = hcsschema.VirtualPMemDevice{ + ReadOnly: true, + HostPath: md.hostPath, + ImageFormat: "Vhd1", + } + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) + } else { + request.Settings = hcsschema.VirtualPMemMapping{ + HostPath: md.hostPath, + ImageFormat: "Vhd1", + } + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) + } + case requesttype.Remove: + if pmem == nil { + return nil, errors.Errorf("no device found at location %d", deviceNumber) + } + if len(pmem.mappings) == 1 { + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemControllerResourceFormat, deviceNumber) + } else { + request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) + } + default: + return nil, errors.New("unsupported request type") + } + + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": deviceNumber, + "hostPath": md.hostPath, + "uvmPath": md.uvmPath, + }).Debugf("new mapped VPMem modify request: %v", request) + return request, nil +} + +// mapVHDLayer adds `device` to mappings +func (pmem *vPMemInfoMulti) mapVHDLayer(ctx context.Context, device *mappedDeviceInfo) (err error) { + if md, ok := pmem.mappings[device.hostPath]; ok { + md.refCount++ + return nil + } + + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": device.hostPath, + "mountPath": device.uvmPath, + "deviceOffset": device.mappedRegion.Offset(), + "deviceSize": device.sizeInBytes, + }).Debug("mapped new device") + + pmem.mappings[device.hostPath] = device + return nil +} + +// unmapVHDLayer removes mapped device with `hostPath` from mappings and releases allocated memory +func (pmem *vPMemInfoMulti) unmapVHDLayer(ctx context.Context, hostPath string) (err error) { + dev, ok := pmem.mappings[hostPath] + if !ok { + return ErrNotAttached + } + + if dev.refCount > 1 { + dev.refCount-- + return nil + } + + if err := pmem.Release(dev.mappedRegion); err != nil { + return err + } + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": dev.hostPath, + }).Debugf("Done releasing resources: %s", dev.hostPath) + delete(pmem.mappings, hostPath) + return nil +} + +// findVPMemMappedDevice finds a VHD device that's been mapped on VPMem surface +func (uvm *UtilityVM) findVPMemMappedDevice(ctx context.Context, hostPath string) (uint32, *mappedDeviceInfo, error) { + for i := uint32(0); i < uvm.vpmemMaxCount; i++ { + vi := uvm.vpmemDevicesMultiMapped[i] + if vi != nil { + if vhd, ok := vi.mappings[hostPath]; ok { + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": i, + "hostPath": hostPath, + "uvmPath": vhd.uvmPath, + "refCount": vhd.refCount, + "deviceSize": vhd.sizeInBytes, + "deviceOffset": vhd.mappedRegion.Offset(), + }).Debug("found mapped VHD") + return i, vhd, nil + } + } + } + return 0, nil, ErrNotAttached +} + +// allocateNextVPMemMappedDeviceLocation allocates a memory region with a minimum offset on the VPMem surface, +// where the device with a given `devSize` can be mapped. +func (uvm *UtilityVM) allocateNextVPMemMappedDeviceLocation(ctx context.Context, devSize uint64) (uint32, memory.MappedRegion, error) { + // device size has to be page aligned + devSize = pageAlign(devSize) + + for i := uint32(0); i < uvm.vpmemMaxCount; i++ { + pmem := uvm.vpmemDevicesMultiMapped[i] + if pmem == nil { + pmem = newPackedVPMemDevice() + uvm.vpmemDevicesMultiMapped[i] = pmem + } + + if len(pmem.mappings) >= int(pmem.maxMappedDeviceCount) { + continue + } + + reg, err := pmem.Allocate(devSize) + if err != nil { + continue + } + log.G(ctx).WithFields(logrus.Fields{ + "deviceNumber": i, + "deviceOffset": reg.Offset(), + "deviceSize": devSize, + }).Debug("found offset for mapped VHD on an existing VPMem device") + return i, reg, nil + } + return 0, nil, ErrNoAvailableLocation +} + +// addVPMemMappedDevice adds container layer as a mapped device, first mapped device is added as a regular +// VPMem device, but subsequent additions will call into mapping APIs +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) addVPMemMappedDevice(ctx context.Context, hostPath string) (_ string, err error) { + if _, dev, err := uvm.findVPMemMappedDevice(ctx, hostPath); err == nil { + dev.refCount++ + return dev.uvmPath, nil + } + + devSize, err := fileSystemSize(hostPath) + if err != nil { + return "", err + } + deviceNumber, memReg, err := uvm.allocateNextVPMemMappedDeviceLocation(ctx, devSize) + if err != nil { + return "", err + } + defer func() { + if err != nil { + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + if err := pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debugf("failed to reclaim pmem region: %s", err) + } + } + }() + + uvmPath := fmt.Sprintf(lcowPackedVPMemLayerFmt, deviceNumber, memReg.Offset(), devSize) + md := newVPMemMappedDevice(hostPath, uvmPath, devSize, memReg) + modification, err := newMappedVPMemModifyRequest(ctx, requesttype.Add, deviceNumber, md, uvm) + if err := uvm.modify(ctx, modification); err != nil { + return "", errors.Errorf("uvm::addVPMemMappedDevice: failed to modify utility VM configuration: %s", err) + } + defer func() { + if err != nil { + rmRequest, _ := newMappedVPMemModifyRequest(ctx, requesttype.Remove, deviceNumber, md, uvm) + if err := uvm.modify(ctx, rmRequest); err != nil { + log.G(ctx).WithError(err).Debugf("failed to rollback modification") + } + } + }() + + pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] + if err := pmem.mapVHDLayer(ctx, md); err != nil { + return "", errors.Wrapf(err, "failed to update internal state") + } + return uvmPath, nil +} + +// removeVPMemMappedDevice removes a mapped container layer, if the layer is the last to be removed, removes +// VPMem device instead +// +// Lock MUST be held when calling this function +func (uvm *UtilityVM) removeVPMemMappedDevice(ctx context.Context, hostPath string) error { + devNum, md, err := uvm.findVPMemMappedDevice(ctx, hostPath) + if err != nil { + return err + } + if md.refCount > 1 { + md.refCount-- + return nil + } + + modification, err := newMappedVPMemModifyRequest(ctx, requesttype.Remove, devNum, md, uvm) + if err != nil { + return err + } + + if err := uvm.modify(ctx, modification); err != nil { + return errors.Errorf("failed to remove packed VPMem %s from UVM %s: %s", md.hostPath, uvm.id, err) + } + + pmem := uvm.vpmemDevicesMultiMapped[devNum] + if err := pmem.unmapVHDLayer(ctx, hostPath); err != nil { + log.G(ctx).WithError(err).Debugf("failed unmapping VHD layer %s", hostPath) + } + if len(pmem.mappings) == 0 { + uvm.vpmemDevicesMultiMapped[devNum] = nil + } + return nil +} diff --git a/test/vendor/modules.txt b/test/vendor/modules.txt index fe9d9b5708..2cd28b7f23 100644 --- a/test/vendor/modules.txt +++ b/test/vendor/modules.txt @@ -12,6 +12,10 @@ github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats github.com/Microsoft/hcsshim/computestorage +github.com/Microsoft/hcsshim/ext4/dmverity +github.com/Microsoft/hcsshim/ext4/internal/compactext4 +github.com/Microsoft/hcsshim/ext4/internal/format +github.com/Microsoft/hcsshim/ext4/tar2ext4 github.com/Microsoft/hcsshim/hcn github.com/Microsoft/hcsshim/internal/clone github.com/Microsoft/hcsshim/internal/cmd @@ -37,6 +41,7 @@ github.com/Microsoft/hcsshim/internal/lcow github.com/Microsoft/hcsshim/internal/log github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/longpath +github.com/Microsoft/hcsshim/internal/memory github.com/Microsoft/hcsshim/internal/mergemaps github.com/Microsoft/hcsshim/internal/ncproxyttrpc github.com/Microsoft/hcsshim/internal/oc