From 85779310b1148079712012bc76a241ca68def733 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Sun, 4 Jun 2017 22:21:41 -0700 Subject: [PATCH 1/8] Fixing issue with bit allocation byteoffset calculation The byteoffset calculation was skewed to double include the offset value calculated. The double calculation happens if the starting ordinal is part of the head sequence block. This error in calculation could result in duplicate but getting allocated eventually propogating to ipam and vni id allocations Signed-off-by: Abhinandan Prativadi (cherry picked from commit 6ed2ad9a8821fb562f46c6cd410dca2cc72615a3) --- bitseq/sequence.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bitseq/sequence.go b/bitseq/sequence.go index a67999313c..1960d8ac5a 100644 --- a/bitseq/sequence.go +++ b/bitseq/sequence.go @@ -496,7 +496,10 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { // Derive the this sequence offsets byteOffset := byteStart - inBlockBytePos bitOffset := inBlockBytePos*8 + bitStart - + var firstOffset uint64 + if current == head { + firstOffset = byteOffset + } for current != nil { if current.block != blockMAX { bytePos, bitPos, err := current.getAvailableBit(bitOffset) @@ -504,7 +507,8 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { } // Moving to next block: Reset bit offset. bitOffset = 0 - byteOffset += current.count * blockBytes + byteOffset += (current.count * blockBytes) - firstOffset + firstOffset = 0 current = current.next } return invalidPos, invalidPos, ErrNoBitAvailable From ce8f87ee58ecec752682d8a36fdb509178f3e519 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Fri, 9 Jun 2017 14:19:53 -0700 Subject: [PATCH 2/8] Adding a unit test to catch offset scenarios Signed-off-by: Abhinandan Prativadi (cherry picked from commit f766f091c8ad33b04253ebc5758d025a8e4e5b6b) --- bitseq/sequence_test.go | 104 +++++++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 32 deletions(-) diff --git a/bitseq/sequence_test.go b/bitseq/sequence_test.go index 4ef3bbb6fe..1a31252266 100644 --- a/bitseq/sequence_test.go +++ b/bitseq/sequence_test.go @@ -157,42 +157,45 @@ func TestGetFirstAvailable(t *testing.T) { mask *sequence bytePos uint64 bitPos uint64 + start uint64 }{ - {&sequence{block: 0xffffffff, count: 2048}, invalidPos, invalidPos}, - {&sequence{block: 0x0, count: 8}, 0, 0}, - {&sequence{block: 0x80000000, count: 8}, 0, 1}, - {&sequence{block: 0xC0000000, count: 8}, 0, 2}, - {&sequence{block: 0xE0000000, count: 8}, 0, 3}, - {&sequence{block: 0xF0000000, count: 8}, 0, 4}, - {&sequence{block: 0xF8000000, count: 8}, 0, 5}, - {&sequence{block: 0xFC000000, count: 8}, 0, 6}, - {&sequence{block: 0xFE000000, count: 8}, 0, 7}, - - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x00000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 0}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x80000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 1}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xC0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 2}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xE0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 3}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xF0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 4}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xF8000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 5}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFC000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 6}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFE000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 7}, - - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFF000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 0}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFF800000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 1}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFC00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 2}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFE00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 3}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFF00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 4}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFF80000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 5}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFFC0000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 6}, - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFFE0000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 7}, - - {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xfffffffe, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 7, 7}, - - {&sequence{block: 0xffffffff, count: 2, next: &sequence{block: 0x0, count: 6}}, 8, 0}, + {&sequence{block: 0xffffffff, count: 2048}, invalidPos, invalidPos, 0}, + {&sequence{block: 0x0, count: 8}, 0, 0, 0}, + {&sequence{block: 0x80000000, count: 8}, 0, 1, 0}, + {&sequence{block: 0xC0000000, count: 8}, 0, 2, 0}, + {&sequence{block: 0xE0000000, count: 8}, 0, 3, 0}, + {&sequence{block: 0xF0000000, count: 8}, 0, 4, 0}, + {&sequence{block: 0xF8000000, count: 8}, 0, 5, 0}, + {&sequence{block: 0xFC000000, count: 8}, 0, 6, 0}, + {&sequence{block: 0xFE000000, count: 8}, 0, 7, 0}, + {&sequence{block: 0xFE000000, count: 8}, 3, 0, 24}, + + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x00000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 0, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x80000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 1, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xC0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 2, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xE0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 3, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xF0000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 4, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xF8000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 5, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFC000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 6, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFE000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 7, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x0E000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 4, 0, 16}, + + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFF000000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 0, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFF800000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 1, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFC00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 2, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFE00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 3, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFF00000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 4, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFF80000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 5, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFFC0000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 6, 0}, + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xFFFE0000, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 5, 7, 0}, + + {&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xfffffffe, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 7, 7, 0}, + + {&sequence{block: 0xffffffff, count: 2, next: &sequence{block: 0x0, count: 6}}, 8, 0, 0}, } for n, i := range input { - bytePos, bitPos, _ := getFirstAvailable(i.mask, 0) + bytePos, bitPos, _ := getFirstAvailable(i.mask, i.start) if bytePos != i.bytePos || bitPos != i.bitPos { t.Fatalf("Error in (%d) getFirstAvailable(). Expected (%d, %d). Got (%d, %d)", n, i.bytePos, i.bitPos, bytePos, bitPos) } @@ -625,6 +628,43 @@ func TestSetUnset(t *testing.T) { } } +func TestOffsetSetUnset(t *testing.T) { + numBits := uint64(32 * blockLen) + var o uint64 + hnd, err := NewHandle("", nil, "", numBits) + if err != nil { + t.Fatal(err) + } + + // set and unset all one by one + for hnd.Unselected() > 0 { + if _, err := hnd.SetAny(); err != nil { + t.Fatal(err) + } + } + + if _, err := hnd.SetAny(); err != ErrNoBitAvailable { + t.Fatal("Expected error. Got success") + } + + if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { + t.Fatal("Expected error. Got success") + } + + if err := hnd.Unset(288); err != nil { + t.Fatal(err) + } + + //At this point sequence is (0xffffffff, 9)->(0x7fffffff, 1)->(0xffffffff, 22)->end + if o, err = hnd.SetAnyInRange(32, 500); err != nil { + t.Fatal(err) + } + + if o != 288 { + t.Fatalf("Expected ordinal not received, Received:%d", o) + } +} + func TestSetInRange(t *testing.T) { numBits := uint64(1024 * blockLen) hnd, err := NewHandle("", nil, "", numBits) From 1672aa69d947ccc98a36e6e653cc1e4b83b042f2 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 13:33:51 -0700 Subject: [PATCH 3/8] Serializing bitseq alloc Previously the bitseq alloc was allocating the first available bit from the begining of the sequence. With this commit the bitseq alloc will proceed from the current allocation. This change will affect the way ipam and vni allocation is done currently. The ip allocation will be done sequentially from the previous allocation as opposed to the first available IP. Signed-off-by: Abhinandan Prativadi (cherry picked from commit 5790b5c17ce7a6e2733ba0f4dfccced8144237e3) (cherry picked from commit e673595ac42b4953d8b5eeebe295cf2884db9844) --- bitseq/sequence.go | 31 ++++++++++++++++++++++++++++--- bitseq/store.go | 1 + libnetwork_internal_test.go | 2 +- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/bitseq/sequence.go b/bitseq/sequence.go index 1960d8ac5a..3123734e8c 100644 --- a/bitseq/sequence.go +++ b/bitseq/sequence.go @@ -40,6 +40,7 @@ type Handle struct { id string dbIndex uint64 dbExists bool + curr uint64 store datastore.DataStore sync.Mutex } @@ -192,6 +193,7 @@ func (h *Handle) getCopy() *Handle { dbIndex: h.dbIndex, dbExists: h.dbExists, store: h.store, + curr: h.curr, } } @@ -322,10 +324,10 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 bytePos, bitPos = ordinalToPos(ordinal) } else { if any { - bytePos, bitPos, err = getFirstAvailable(h.head, start) + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, h.curr, end) ret = posToOrdinal(bytePos, bitPos) - if end < ret { - err = ErrNoBitAvailable + if err == nil { + h.curr = ret + 1 } } else { bytePos, bitPos, err = checkIfAvailable(h.head, ordinal) @@ -514,6 +516,29 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { return invalidPos, invalidPos, ErrNoBitAvailable } +//getAvailableFromCurrent will look for available ordinal from the current ordinal. +// If none found then it will loop back to the start to check of the available bit. +//This can be further optimized to check from start till curr in case of a rollover +func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { + var bytePos, bitPos uint64 + if curr != 0 && curr > start { + bytePos, bitPos, _ = getFirstAvailable(head, curr) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + goto begin + } + return bytePos, bitPos, nil + } + +begin: + bytePos, bitPos, _ = getFirstAvailable(head, start) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + return invalidPos, invalidPos, ErrNoBitAvailable + } + return bytePos, bitPos, nil +} + // checkIfAvailable checks if the bit correspondent to the specified ordinal is unset // If the ordinal is beyond the sequence limits, a negative response is returned func checkIfAvailable(head *sequence, ordinal uint64) (uint64, uint64, error) { diff --git a/bitseq/store.go b/bitseq/store.go index 5448927eb1..cdb7f04264 100644 --- a/bitseq/store.go +++ b/bitseq/store.go @@ -87,6 +87,7 @@ func (h *Handle) CopyTo(o datastore.KVObject) error { dstH.dbIndex = h.dbIndex dstH.dbExists = h.dbExists dstH.store = h.store + dstH.curr = h.curr dstH.Unlock() return nil diff --git a/libnetwork_internal_test.go b/libnetwork_internal_test.go index c5b8ae2ae7..a5782711b8 100644 --- a/libnetwork_internal_test.go +++ b/libnetwork_internal_test.go @@ -608,7 +608,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } defer ep.Delete(false) - expectedIP, _ := types.ParseCIDR("10.34.0.1/16") + expectedIP, _ := types.ParseCIDR("10.34.0.2/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) } From ab395157ddaa77243416edb0cd8bb72c1c718881 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 17:41:21 -0700 Subject: [PATCH 4/8] Minor unit test change Since bit allocation is no longer first available from the start some verfications are removed/modified to the change allocation model Signed-off-by: Abhinandan Prativadi (cherry picked from commit 0184de8fc5074f94a74a0025ff60dab69cc442a9) (cherry picked from commit e68e37f8d64f9ba097cacef86e2d3f65c1a691fb) --- bitseq/sequence_test.go | 206 +++++++++++++++++++++++++++++++++++++--- ipam/allocator_test.go | 134 ++++++++++++++++++++++++++ 2 files changed, 328 insertions(+), 12 deletions(-) diff --git a/bitseq/sequence_test.go b/bitseq/sequence_test.go index 1a31252266..2373755e67 100644 --- a/bitseq/sequence_test.go +++ b/bitseq/sequence_test.go @@ -562,7 +562,7 @@ func TestSet(t *testing.T) { t.Fatalf("Expected failure, but succeeded") } - os, err := hnd.SetAny() + os, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure: %v", err) } @@ -606,12 +606,12 @@ func TestSetUnset(t *testing.T) { // set and unset all one by one for hnd.Unselected() > 0 { - if _, err := hnd.SetAny(); err != nil { + if _, err := hnd.SetAny(false); err != nil { t.Fatal(err) } } - if _, err := hnd.SetAny(); err != ErrNoBitAvailable { - t.Fatalf("Expected error. Got success") + if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { + t.Fatal("Expected error. Got success") } if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { t.Fatalf("Expected error. Got success") @@ -638,16 +638,16 @@ func TestOffsetSetUnset(t *testing.T) { // set and unset all one by one for hnd.Unselected() > 0 { - if _, err := hnd.SetAny(); err != nil { + if _, err := hnd.SetAny(false); err != nil { t.Fatal(err) } } - if _, err := hnd.SetAny(); err != ErrNoBitAvailable { + if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } - if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { + if _, err := hnd.SetAnyInRange(10, 20, false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } @@ -656,7 +656,7 @@ func TestOffsetSetUnset(t *testing.T) { } //At this point sequence is (0xffffffff, 9)->(0x7fffffff, 1)->(0xffffffff, 22)->end - if o, err = hnd.SetAnyInRange(32, 500); err != nil { + if o, err = hnd.SetAnyInRange(32, 500, false); err != nil { t.Fatal(err) } @@ -785,6 +785,126 @@ func TestSetInRange(t *testing.T) { } } +func TestSetInRangeSerial(t *testing.T) { + numBits := uint64(1024 * blockLen) + hnd, err := NewHandle("", nil, "", numBits) + if err != nil { + t.Fatal(err) + } + hnd.head = getTestSequence() + + firstAv := uint64(100*blockLen + blockLen - 1) + + if o, err := hnd.SetAnyInRange(4, 3); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(0, numBits); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) + if err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + if o != firstAv { + t.Fatalf("Unexpected ordinal: %d", o) + } + + if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err != nil { + t.Fatal(err) + } + if o != 161*uint64(blockLen)+30 { + t.Fatalf("Unexpected ordinal: %d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err != nil { + t.Fatal(err) + } + if o != 161*uint64(blockLen)+31 { + t.Fatalf("Unexpected ordinal: %d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { + t.Fatalf("Unexpected failure: %v", err) + } + + // set one bit using the set range with 1 bit size range + if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { + t.Fatal(err) + } + + // create a non multiple of 32 mask + hnd, err = NewHandle("", nil, "", 30) + if err != nil { + t.Fatal(err) + } + + // set all bit in the first range + for hnd.Unselected() > 22 { + if o, err := hnd.SetAnyInRange(0, 7); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + // try one more set, which should fail + o, err = hnd.SetAnyInRange(0, 7) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } + + // set all bit in a second range + for hnd.Unselected() > 14 { + if o, err := hnd.SetAnyInRange(8, 15); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + + // try one more set, which should fail + o, err = hnd.SetAnyInRange(0, 15) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } + + // set all bit in a range which includes the last bit + for hnd.Unselected() > 12 { + if o, err := hnd.SetAnyInRange(28, 29); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + o, err = hnd.SetAnyInRange(28, 29) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } +} + // This one tests an allocation pattern which unveiled an issue in pushReservation // Specifically a failure in detecting when we are in the (B) case (the bit to set // belongs to the last block of the current sequence). Because of a bug, code @@ -861,7 +981,7 @@ func TestMethods(t *testing.T) { } for i := 0; i < 192; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatal(err) } @@ -941,7 +1061,7 @@ func TestAllocateRandomDeallocate(t *testing.T) { // Allocate first half of the bits for i := 0; i < numBits/2; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } @@ -971,7 +1091,7 @@ func TestAllocateRandomDeallocate(t *testing.T) { // Request a quarter of bits for i := 0; i < numBits/4; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) } @@ -989,6 +1109,68 @@ func TestAllocateRandomDeallocate(t *testing.T) { } } +func TestAllocateRandomDeallocateSerialize(t *testing.T) { + ds, err := randomLocalStore() + if err != nil { + t.Fatal(err) + } + + numBlocks := uint32(8) + numBits := int(numBlocks * blockLen) + hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits)) + if err != nil { + t.Fatal(err) + } + + expected := &sequence{block: 0xffffffff, count: uint64(numBlocks / 2), next: &sequence{block: 0x0, count: uint64(numBlocks / 2)}} + + // Allocate first half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) + } + } + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) + } + if !hnd.head.equal(expected) { + t.Fatalf("Unexpected sequence. Got:\n%s", hnd) + } + + seed := time.Now().Unix() + rand.Seed(seed) + + // Deallocate half of the allocated bits following a random pattern + pattern := rand.Perm(numBits / 2) + for i := 0; i < numBits/4; i++ { + bit := pattern[i] + err := hnd.Unset(uint64(bit)) + if err != nil { + t.Fatalf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(3*numBits/4) { + t.Fatalf("Expected full sequence. Instead found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd) + } + + // Request a quarter of bits + for i := 0; i < numBits/4; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected half sequence. Instead found %d free bits.\nSeed: %d\n%s", hnd.unselected, seed, hnd) + } + + err = hnd.Destroy() + if err != nil { + t.Fatal(err) + } +} + func TestRetrieveFromStore(t *testing.T) { ds, err := randomLocalStore() if err != nil { @@ -1003,7 +1185,7 @@ func TestRetrieveFromStore(t *testing.T) { // Allocate first half of the bits for i := 0; i < numBits/2; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } diff --git a/ipam/allocator_test.go b/ipam/allocator_test.go index c7f84a6122..0b3f7049a0 100644 --- a/ipam/allocator_test.go +++ b/ipam/allocator_test.go @@ -645,6 +645,7 @@ func TestRequestReleaseAddressFromSubPool(t *testing.T) { unoExp, _ := types.ParseCIDR("10.2.2.0/16") dueExp, _ := types.ParseCIDR("10.2.2.2/16") treExp, _ := types.ParseCIDR("10.2.2.1/16") + if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil { t.Fatal(err) } @@ -695,6 +696,139 @@ func TestRequestReleaseAddressFromSubPool(t *testing.T) { } } +func TestSerializeRequestReleaseAddressFromSubPool(t *testing.T) { + opts := map[string]string{ + ipamapi.AllocSerialPrefix: "true"} + a, err := getAllocator() + if err != nil { + t.Fatal(err) + } + a.addrSpaces["rosso"] = &addrSpace{ + id: dsConfigKey + "/" + "rosso", + ds: a.addrSpaces[localAddressSpace].ds, + alloc: a.addrSpaces[localAddressSpace].alloc, + scope: a.addrSpaces[localAddressSpace].scope, + subnets: map[SubnetKey]*PoolData{}, + } + + poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false) + if err != nil { + t.Fatal(err) + } + + var ip *net.IPNet + expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}} + for err == nil { + var c *net.IPNet + if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil { + ip = c + } + } + if err != ipamapi.ErrNoAvailableIPs { + t.Fatal(err) + } + if !types.CompareIPNet(expected, ip) { + t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip) + } + rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}} + if err = a.ReleaseAddress(poolID, rp.IP); err != nil { + t.Fatal(err) + } + if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(rp, ip) { + t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip) + } + + _, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false) + if err != nil { + t.Fatal(err) + } + poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false) + if err != nil { + t.Fatal(err) + } + expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}} + for err == nil { + var c *net.IPNet + if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil { + ip = c + } + } + if err != ipamapi.ErrNoAvailableIPs { + t.Fatal(err) + } + if !types.CompareIPNet(expected, ip) { + t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip) + } + rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}} + if err = a.ReleaseAddress(poolID, rp.IP); err != nil { + t.Fatal(err) + } + if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(rp, ip) { + t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip) + } + + // Request any addresses from subpool after explicit address request + unoExp, _ := types.ParseCIDR("10.2.2.0/16") + dueExp, _ := types.ParseCIDR("10.2.2.2/16") + treExp, _ := types.ParseCIDR("10.2.2.1/16") + quaExp, _ := types.ParseCIDR("10.2.2.3/16") + fivExp, _ := types.ParseCIDR("10.2.2.4/16") + if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil { + t.Fatal(err) + } + tre, _, err := a.RequestAddress(poolID, treExp.IP, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(tre, treExp) { + t.Fatalf("Unexpected address: %v", tre) + } + + uno, _, err := a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(uno, unoExp) { + t.Fatalf("Unexpected address: %v", uno) + } + + due, _, err := a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(due, dueExp) { + t.Fatalf("Unexpected address: %v", due) + } + + if err = a.ReleaseAddress(poolID, uno.IP); err != nil { + t.Fatal(err) + } + uno, _, err = a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(uno, quaExp) { + t.Fatalf("Unexpected address: %v", uno) + } + + if err = a.ReleaseAddress(poolID, tre.IP); err != nil { + t.Fatal(err) + } + tre, _, err = a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(tre, fivExp) { + t.Fatalf("Unexpected address: %v", tre) + } +} + func TestGetAddress(t *testing.T) { input := []string{ /*"10.0.0.0/8", "10.0.0.0/9", "10.0.0.0/10",*/ "10.0.0.0/11", "10.0.0.0/12", "10.0.0.0/13", "10.0.0.0/14", From 2356397ae4aea524e815ffc8b19bb21159f9c932 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 19:41:21 -0700 Subject: [PATCH 5/8] Adding a unit case to verify rollover Signed-off-by: Abhinandan Prativadi (cherry picked from commit a2bcac00122a43d771af77ff060794dd02bb4a00) (cherry picked from commit 0031dc3f870bf472abaa5f56f40a146556bd6b0d) --- bitseq/sequence.go | 24 +- bitseq/sequence_test.go | 240 +++++++----------- drivers/overlay/ov_network.go | 2 +- drivers/overlay/ovmanager/ovmanager.go | 2 +- drivers/solaris/overlay/ov_network.go | 2 +- .../solaris/overlay/ovmanager/ovmanager.go | 2 +- idm/idm.go | 8 +- idm/idm_test.go | 94 +++++-- ipam/allocator.go | 16 +- ipam/allocator_test.go | 2 +- ipamapi/labels.go | 10 + libnetwork_internal_test.go | 2 +- 12 files changed, 220 insertions(+), 184 deletions(-) create mode 100644 ipamapi/labels.go diff --git a/bitseq/sequence.go b/bitseq/sequence.go index 3123734e8c..6864c8fa8b 100644 --- a/bitseq/sequence.go +++ b/bitseq/sequence.go @@ -198,22 +198,22 @@ func (h *Handle) getCopy() *Handle { } // SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal -func (h *Handle) SetAnyInRange(start, end uint64) (uint64, error) { +func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) { if end < start || end >= h.bits { return invalidPos, fmt.Errorf("invalid bit range [%d, %d]", start, end) } if h.Unselected() == 0 { return invalidPos, ErrNoBitAvailable } - return h.set(0, start, end, true, false) + return h.set(0, start, end, true, false, serial) } // SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal -func (h *Handle) SetAny() (uint64, error) { +func (h *Handle) SetAny(serial bool) (uint64, error) { if h.Unselected() == 0 { return invalidPos, ErrNoBitAvailable } - return h.set(0, 0, h.bits-1, true, false) + return h.set(0, 0, h.bits-1, true, false, serial) } // Set atomically sets the corresponding bit in the sequence @@ -221,7 +221,7 @@ func (h *Handle) Set(ordinal uint64) error { if err := h.validateOrdinal(ordinal); err != nil { return err } - _, err := h.set(ordinal, 0, 0, false, false) + _, err := h.set(ordinal, 0, 0, false, false, false) return err } @@ -230,7 +230,7 @@ func (h *Handle) Unset(ordinal uint64) error { if err := h.validateOrdinal(ordinal); err != nil { return err } - _, err := h.set(ordinal, 0, 0, false, true) + _, err := h.set(ordinal, 0, 0, false, true, false) return err } @@ -299,7 +299,7 @@ func (h *Handle) CheckConsistency() error { } // set/reset the bit -func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64, error) { +func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) { var ( bitPos uint64 bytePos uint64 @@ -309,6 +309,7 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 for { var store datastore.DataStore + curr := uint64(0) h.Lock() store = h.store h.Unlock() @@ -319,12 +320,15 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 } h.Lock() + if serial { + curr = h.curr + } // Get position if available if release { bytePos, bitPos = ordinalToPos(ordinal) } else { if any { - bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, h.curr, end) + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end) ret = posToOrdinal(bytePos, bitPos) if err == nil { h.curr = ret + 1 @@ -516,9 +520,9 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { return invalidPos, invalidPos, ErrNoBitAvailable } -//getAvailableFromCurrent will look for available ordinal from the current ordinal. +// getAvailableFromCurrent will look for available ordinal from the current ordinal. // If none found then it will loop back to the start to check of the available bit. -//This can be further optimized to check from start till curr in case of a rollover +// This can be further optimized to check from start till curr in case of a rollover func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { var bytePos, bitPos uint64 if curr != 0 && curr > start { diff --git a/bitseq/sequence_test.go b/bitseq/sequence_test.go index 2373755e67..0007b6d9d5 100644 --- a/bitseq/sequence_test.go +++ b/bitseq/sequence_test.go @@ -613,8 +613,8 @@ func TestSetUnset(t *testing.T) { if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } - if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { - t.Fatalf("Expected error. Got success") + if _, err := hnd.SetAnyInRange(10, 20, false); err != ErrNoBitAvailable { + t.Fatal("Expected error. Got success") } if err := hnd.Set(50); err != ErrBitAllocated { t.Fatalf("Expected error. Got %v: %s", err, hnd) @@ -675,15 +675,15 @@ func TestSetInRange(t *testing.T) { firstAv := uint64(100*blockLen + blockLen - 1) - if o, err := hnd.SetAnyInRange(4, 3); err == nil { + if o, err := hnd.SetAnyInRange(4, 3, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(0, numBits); err == nil { + if o, err := hnd.SetAnyInRange(0, numBits, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) + o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen), false) if err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } @@ -691,19 +691,19 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { + if o, err := hnd.SetAnyInRange(0, uint64(blockLen), false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { + if o, err := hnd.SetAnyInRange(0, firstAv-1, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { + if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen), false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err != nil { t.Fatal(err) } @@ -711,7 +711,7 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err != nil { t.Fatal(err) } @@ -719,17 +719,17 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { + if _, err := hnd.SetAnyInRange(0, numBits-1, false); err != nil { t.Fatalf("Unexpected failure: %v", err) } // set one bit using the set range with 1 bit size range - if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { + if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1), false); err != nil { t.Fatal(err) } @@ -741,12 +741,12 @@ func TestSetInRange(t *testing.T) { // set all bit in the first range for hnd.Unselected() > 22 { - if o, err := hnd.SetAnyInRange(0, 7); err != nil { + if o, err := hnd.SetAnyInRange(0, 7, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 7) + o, err = hnd.SetAnyInRange(0, 7, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -756,13 +756,13 @@ func TestSetInRange(t *testing.T) { // set all bit in a second range for hnd.Unselected() > 14 { - if o, err := hnd.SetAnyInRange(8, 15); err != nil { + if o, err := hnd.SetAnyInRange(8, 15, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 15) + o, err = hnd.SetAnyInRange(0, 15, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -772,131 +772,11 @@ func TestSetInRange(t *testing.T) { // set all bit in a range which includes the last bit for hnd.Unselected() > 12 { - if o, err := hnd.SetAnyInRange(28, 29); err != nil { + if o, err := hnd.SetAnyInRange(28, 29, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } - o, err = hnd.SetAnyInRange(28, 29) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestSetInRangeSerial(t *testing.T) { - numBits := uint64(1024 * blockLen) - hnd, err := NewHandle("", nil, "", numBits) - if err != nil { - t.Fatal(err) - } - hnd.head = getTestSequence() - - firstAv := uint64(100*blockLen + blockLen - 1) - - if o, err := hnd.SetAnyInRange(4, 3); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(0, numBits); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) - if err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - if o != firstAv { - t.Fatalf("Unexpected ordinal: %d", o) - } - - if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err != nil { - t.Fatal(err) - } - if o != 161*uint64(blockLen)+30 { - t.Fatalf("Unexpected ordinal: %d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err != nil { - t.Fatal(err) - } - if o != 161*uint64(blockLen)+31 { - t.Fatalf("Unexpected ordinal: %d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { - t.Fatalf("Unexpected failure: %v", err) - } - - // set one bit using the set range with 1 bit size range - if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { - t.Fatal(err) - } - - // create a non multiple of 32 mask - hnd, err = NewHandle("", nil, "", 30) - if err != nil { - t.Fatal(err) - } - - // set all bit in the first range - for hnd.Unselected() > 22 { - if o, err := hnd.SetAnyInRange(0, 7); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 7) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } - - // set all bit in a second range - for hnd.Unselected() > 14 { - if o, err := hnd.SetAnyInRange(8, 15); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - - // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 15) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } - - // set all bit in a range which includes the last bit - for hnd.Unselected() > 12 { - if o, err := hnd.SetAnyInRange(28, 29); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - o, err = hnd.SetAnyInRange(28, 29) + o, err = hnd.SetAnyInRange(28, 29, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -926,7 +806,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatal(err) } - o, err := hnd.SetAnyInRange(128, 255) + o, err := hnd.SetAnyInRange(128, 255, false) if err != nil { t.Fatal(err) } @@ -934,7 +814,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(128, 255) + o, err = hnd.SetAnyInRange(128, 255, false) if err != nil { t.Fatal(err) } @@ -943,7 +823,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(246, 255) + o, err = hnd.SetAnyInRange(246, 255, false) if err != nil { t.Fatal(err) } @@ -951,7 +831,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(246, 255) + o, err = hnd.SetAnyInRange(246, 255, false) if err != nil { t.Fatal(err) } @@ -1131,6 +1011,7 @@ func TestAllocateRandomDeallocateSerialize(t *testing.T) { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } } + if hnd.Unselected() != uint64(numBits/2) { t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) } @@ -1356,3 +1237,76 @@ func TestIsCorrupted(t *testing.T) { } } } + +func TestSetRollover(t *testing.T) { + ds, err := randomLocalStore() + if err != nil { + t.Fatal(err) + } + + numBlocks := uint32(8) + numBits := int(numBlocks * blockLen) + hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits)) + if err != nil { + t.Fatal(err) + } + + // Allocate first half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) + } + } + + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) + } + + seed := time.Now().Unix() + rand.Seed(seed) + + // Deallocate half of the allocated bits following a random pattern + pattern := rand.Perm(numBits / 2) + for i := 0; i < numBits/4; i++ { + bit := pattern[i] + err := hnd.Unset(uint64(bit)) + if err != nil { + t.Fatalf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(3*numBits/4) { + t.Fatalf("Expected full sequence. Instead found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd) + } + + //request to allocate for remaining half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + + //At this point all the bits must be allocated except the randomly unallocated bits + //which were unallocated in the first half of the bit sequence + if hnd.Unselected() != uint64(numBits/4) { + t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), numBits/4) + } + + for i := 0; i < numBits/4; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + //Now requesting to allocate the unallocated random bits (qurter of the number of bits) should + //leave no more bits that can be allocated. + if hnd.Unselected() != 0 { + t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), numBits/4) + } + + err = hnd.Destroy() + if err != nil { + t.Fatal(err) + } +} diff --git a/drivers/overlay/ov_network.go b/drivers/overlay/ov_network.go index 9190e555fb..860802e7ed 100644 --- a/drivers/overlay/ov_network.go +++ b/drivers/overlay/ov_network.go @@ -1063,7 +1063,7 @@ func (n *network) obtainVxlanID(s *subnet) error { } if s.vni == 0 { - vxlanID, err := n.driver.vxlanIdm.GetID() + vxlanID, err := n.driver.vxlanIdm.GetID(true) if err != nil { return fmt.Errorf("failed to allocate vxlan id: %v", err) } diff --git a/drivers/overlay/ovmanager/ovmanager.go b/drivers/overlay/ovmanager/ovmanager.go index 2c4c771e58..7e64f45da5 100644 --- a/drivers/overlay/ovmanager/ovmanager.go +++ b/drivers/overlay/ovmanager/ovmanager.go @@ -164,7 +164,7 @@ func (n *network) obtainVxlanID(s *subnet) error { n.Unlock() if vni == 0 { - vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd) + vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd, true) if err != nil { return err } diff --git a/drivers/solaris/overlay/ov_network.go b/drivers/solaris/overlay/ov_network.go index e9b27ba5bd..16fdd2df51 100644 --- a/drivers/solaris/overlay/ov_network.go +++ b/drivers/solaris/overlay/ov_network.go @@ -723,7 +723,7 @@ func (n *network) obtainVxlanID(s *subnet) error { } if s.vni == 0 { - vxlanID, err := n.driver.vxlanIdm.GetID() + vxlanID, err := n.driver.vxlanIdm.GetID(true) if err != nil { return fmt.Errorf("failed to allocate vxlan id: %v", err) } diff --git a/drivers/solaris/overlay/ovmanager/ovmanager.go b/drivers/solaris/overlay/ovmanager/ovmanager.go index a756990acb..512cb11d93 100644 --- a/drivers/solaris/overlay/ovmanager/ovmanager.go +++ b/drivers/solaris/overlay/ovmanager/ovmanager.go @@ -164,7 +164,7 @@ func (n *network) obtainVxlanID(s *subnet) error { n.Unlock() if vni == 0 { - vni, err = n.driver.vxlanIdm.GetID() + vni, err = n.driver.vxlanIdm.GetID(true) if err != nil { return err } diff --git a/idm/idm.go b/idm/idm.go index 20be113c83..83098e4f7e 100644 --- a/idm/idm.go +++ b/idm/idm.go @@ -33,11 +33,11 @@ func New(ds datastore.DataStore, id string, start, end uint64) (*Idm, error) { } // GetID returns the first available id in the set -func (i *Idm) GetID() (uint64, error) { +func (i *Idm) GetID(serial bool) (uint64, error) { if i.handle == nil { return 0, fmt.Errorf("ID set is not initialized") } - ordinal, err := i.handle.SetAny() + ordinal, err := i.handle.SetAny(serial) return i.start + ordinal, err } @@ -55,7 +55,7 @@ func (i *Idm) GetSpecificID(id uint64) error { } // GetIDInRange returns the first available id in the set within a [start,end] range -func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) { +func (i *Idm) GetIDInRange(start, end uint64, serial bool) (uint64, error) { if i.handle == nil { return 0, fmt.Errorf("ID set is not initialized") } @@ -64,7 +64,7 @@ func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) { return 0, fmt.Errorf("Requested range does not belong to the set") } - ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start) + ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start, serial) return i.start + ordinal, err } diff --git a/idm/idm_test.go b/idm/idm_test.go index 2100943a55..a207274be2 100644 --- a/idm/idm_test.go +++ b/idm/idm_test.go @@ -46,7 +46,7 @@ func TestAllocate(t *testing.T) { t.Fatalf("Expected failure but succeeded") } - o, err := i.GetID() + o, err := i.GetID(false) if err != nil { t.Fatal(err) } @@ -59,7 +59,7 @@ func TestAllocate(t *testing.T) { t.Fatal(err) } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -67,7 +67,7 @@ func TestAllocate(t *testing.T) { t.Fatalf("Unexpected id returned: %d", o) } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -75,14 +75,14 @@ func TestAllocate(t *testing.T) { t.Fatalf("Unexpected id returned: %d", o) } - o, err = i.GetID() + o, err = i.GetID(false) if err == nil { t.Fatalf("Expected failure but succeeded: %d", o) } i.Release(50) - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -100,8 +100,8 @@ func TestAllocate(t *testing.T) { func TestUninitialized(t *testing.T) { i := &Idm{} - if _, err := i.GetID(); err == nil { - t.Fatalf("Expected failure but succeeded") + if _, err := i.GetID(false); err == nil { + t.Fatal("Expected failure but succeeded") } if err := i.GetSpecificID(44); err == nil { @@ -115,7 +115,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatal(err) } - o, err := i.GetIDInRange(6, 6) + o, err := i.GetIDInRange(6, 6, false) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Expected failure but succeeded") } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(6) - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -146,7 +146,7 @@ func TestAllocateInRange(t *testing.T) { } for n := 7; n <= 10; n++ { - o, err := i.GetIDInRange(7, 10) + o, err := i.GetIDInRange(7, 10, false) if err != nil { t.Fatal(err) } @@ -165,7 +165,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(10) - o, err = i.GetIDInRange(5, 10) + o, err = i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -175,7 +175,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(5) - o, err = i.GetIDInRange(5, 10) + o, err = i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -188,7 +188,7 @@ func TestAllocateInRange(t *testing.T) { } for n := 5; n <= 10; n++ { - o, err := i.GetIDInRange(5, 10) + o, err := i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -210,7 +210,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatal(err) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -218,7 +218,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4096. Got: %d", o) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -226,7 +226,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4097. Got: %d", o) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -234,3 +234,63 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4098. Got: %d", o) } } + +func TestAllocateSerial(t *testing.T) { + i, err := New(nil, "myids", 50, 55) + if err != nil { + t.Fatal(err) + } + + if err = i.GetSpecificID(49); err == nil { + t.Fatal("Expected failure but succeeded") + } + + if err = i.GetSpecificID(56); err == nil { + t.Fatal("Expected failure but succeeded") + } + + o, err := i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 50 { + t.Fatalf("Unexpected first id returned: %d", o) + } + + err = i.GetSpecificID(50) + if err == nil { + t.Fatal(err) + } + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 51 { + t.Fatalf("Unexpected id returned: %d", o) + } + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 52 { + t.Fatalf("Unexpected id returned: %d", o) + } + + i.Release(50) + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 53 { + t.Fatal("Unexpected id returned") + } + + i.Release(52) + err = i.GetSpecificID(52) + if err != nil { + t.Fatal(err) + } +} diff --git a/ipam/allocator.go b/ipam/allocator.go index 4243d57a74..6a5e8c2f4f 100644 --- a/ipam/allocator.go +++ b/ipam/allocator.go @@ -457,7 +457,15 @@ func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s return nil, nil, types.InternalErrorf("could not find bitmask in datastore for %s on address %v request from pool %s: %v", k.String(), prefAddress, poolID, err) } - ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range) + // In order to request for a serial ip address allocation, callers can pass in the option to request + // IP allocation serially or first available IP in the subnet + var serial bool + if opts != nil { + if val, ok := opts[ipamapi.AllocSerialPrefix]; ok { + serial = (val == "true") + } + } + ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range, serial) if err != nil { return nil, nil, err } @@ -522,7 +530,7 @@ func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error { return bm.Unset(ipToUint64(h)) } -func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange) (net.IP, error) { +func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange, serial bool) (net.IP, error) { var ( ordinal uint64 err error @@ -535,7 +543,7 @@ func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddres return nil, ipamapi.ErrNoAvailableIPs } if ipr == nil && prefAddress == nil { - ordinal, err = bitmask.SetAny() + ordinal, err = bitmask.SetAny(serial) } else if prefAddress != nil { hostPart, e := types.GetHostPartIP(prefAddress, base.Mask) if e != nil { @@ -544,7 +552,7 @@ func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddres ordinal = ipToUint64(types.GetMinimalIP(hostPart)) err = bitmask.Set(ordinal) } else { - ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End) + ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End, serial) } switch err { diff --git a/ipam/allocator_test.go b/ipam/allocator_test.go index 0b3f7049a0..be314ca66a 100644 --- a/ipam/allocator_test.go +++ b/ipam/allocator_test.go @@ -1034,7 +1034,7 @@ func assertGetAddress(t *testing.T, subnet string) { start := time.Now() run := 0 for err != ipamapi.ErrNoAvailableIPs { - _, err = a.getAddress(sub, bm, nil, nil) + _, err = a.getAddress(sub, bm, nil, nil, false) run++ } if printTime { diff --git a/ipamapi/labels.go b/ipamapi/labels.go new file mode 100644 index 0000000000..e5c7d1cc7e --- /dev/null +++ b/ipamapi/labels.go @@ -0,0 +1,10 @@ +package ipamapi + +const ( + // Prefix constant marks the reserved label space for libnetwork + Prefix = "com.docker.network" + + // AllocSerialPrefix constant marks the reserved label space for libnetwork ipam + // allocation ordering.(serial/first available) + AllocSerialPrefix = Prefix + ".ipam.serial" +) diff --git a/libnetwork_internal_test.go b/libnetwork_internal_test.go index a5782711b8..c5b8ae2ae7 100644 --- a/libnetwork_internal_test.go +++ b/libnetwork_internal_test.go @@ -608,7 +608,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } defer ep.Delete(false) - expectedIP, _ := types.ParseCIDR("10.34.0.2/16") + expectedIP, _ := types.ParseCIDR("10.34.0.1/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) } From 878043960238db64a7e783199f688211560dd84c Mon Sep 17 00:00:00 2001 From: Flavio Crisciani Date: Thu, 16 Nov 2017 08:53:56 -0800 Subject: [PATCH 6/8] Fix failures for backport Signed-off-by: Flavio Crisciani --- drivers/windows/overlay/ov_network_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/windows/overlay/ov_network_windows.go b/drivers/windows/overlay/ov_network_windows.go index f89469f744..d94c84109b 100644 --- a/drivers/windows/overlay/ov_network_windows.go +++ b/drivers/windows/overlay/ov_network_windows.go @@ -444,7 +444,7 @@ func (n *network) obtainVxlanID(s *subnet) error { } if s.vni == 0 { - vxlanID, err := n.driver.vxlanIdm.GetID() + vxlanID, err := n.driver.vxlanIdm.GetID(false) if err != nil { return fmt.Errorf("failed to allocate vxlan id: %v", err) } From 0342c7b5f2a45cd4160ede8f162206e7350b93ff Mon Sep 17 00:00:00 2001 From: Santhosh Manohar Date: Tue, 10 Jan 2017 13:11:48 -0800 Subject: [PATCH 7/8] Godep update for docker plugingetter pkg Signed-off-by: Santhosh Manohar (cherry picked from commit 6f43b46432050dfe23e91cff65e30c61591ca427) --- Godeps/Godeps.json | 4 ++-- .../docker/docker/pkg/plugingetter/getter.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ee923c4ce2..b637c9c774 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -143,8 +143,8 @@ }, { "ImportPath": "github.com/docker/docker/pkg/plugingetter", - "Comment": "docs-v1.12.0-rc4-2016-07-15-4185-ge4512d2", - "Rev": "e4512d264741e83e954a19f9ef5e3cb06c5856b6" + "Comment": "docs-v1.12.0-rc4-2016-07-15-1985-g9c96768", + "Rev": "9c96768eae4b3a65147b47a55c850c103ab8972d" }, { "ImportPath": "github.com/docker/docker/pkg/plugins", diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/plugingetter/getter.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/plugingetter/getter.go index 6c2c82c670..b04b7bc828 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/plugingetter/getter.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/plugingetter/getter.go @@ -3,12 +3,12 @@ package plugingetter import "github.com/docker/docker/pkg/plugins" const ( - // LOOKUP doesn't update RefCount - LOOKUP = 0 - // ACQUIRE increments RefCount - ACQUIRE = 1 - // RELEASE decrements RefCount - RELEASE = -1 + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 ) // CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. From 22aa3355bc3ce2997b0da9080bb01921d3265bc4 Mon Sep 17 00:00:00 2001 From: Santhosh Manohar Date: Tue, 10 Jan 2017 13:17:15 -0800 Subject: [PATCH 8/8] carry docker/docker #29564 Signed-off-by: Santhosh Manohar (cherry picked from commit ec570ad8c6fc1fd7ba18326cc2c1611d2db8625e) --- controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controller.go b/controller.go index e3b6f77754..1a83a90018 100644 --- a/controller.go +++ b/controller.go @@ -1096,7 +1096,7 @@ func (c *controller) loadDriver(networkType string) error { var err error if pg := c.GetPluginGetter(); pg != nil { - _, err = pg.Get(networkType, driverapi.NetworkPluginEndpointType, plugingetter.LOOKUP) + _, err = pg.Get(networkType, driverapi.NetworkPluginEndpointType, plugingetter.Lookup) } else { _, err = plugins.Get(networkType, driverapi.NetworkPluginEndpointType) } @@ -1115,7 +1115,7 @@ func (c *controller) loadIPAMDriver(name string) error { var err error if pg := c.GetPluginGetter(); pg != nil { - _, err = pg.Get(name, ipamapi.PluginEndpointType, plugingetter.LOOKUP) + _, err = pg.Get(name, ipamapi.PluginEndpointType, plugingetter.Lookup) } else { _, err = plugins.Get(name, ipamapi.PluginEndpointType) }