From c45a00d2b8be3cb431bf0418ba365ce570393b10 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 13:33:51 -0700 Subject: [PATCH 1/3] Serializing bitseq alloc Previously the bitseq alloc was allocating the first available bit from the begining of the sequence. With this commit the bitseq alloc will proceed from the current allocation. This change will affect the way ipam and vni allocation is done currently. The ip allocation will be done sequentially from the previous allocation as opposed to the first available IP. Signed-off-by: Abhinandan Prativadi (cherry picked from commit 5790b5c17ce7a6e2733ba0f4dfccced8144237e3) Signed-off-by: Sebastiaan van Stijn --- bitseq/sequence.go | 31 ++++++++++++++++++++++++++++--- bitseq/store.go | 1 + libnetwork_internal_test.go | 2 +- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/bitseq/sequence.go b/bitseq/sequence.go index 3946473d8b..ade40633f0 100644 --- a/bitseq/sequence.go +++ b/bitseq/sequence.go @@ -41,6 +41,7 @@ type Handle struct { id string dbIndex uint64 dbExists bool + curr uint64 store datastore.DataStore sync.Mutex } @@ -193,6 +194,7 @@ func (h *Handle) getCopy() *Handle { dbIndex: h.dbIndex, dbExists: h.dbExists, store: h.store, + curr: h.curr, } } @@ -323,10 +325,10 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 bytePos, bitPos = ordinalToPos(ordinal) } else { if any { - bytePos, bitPos, err = getFirstAvailable(h.head, start) + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, h.curr, end) ret = posToOrdinal(bytePos, bitPos) - if end < ret { - err = ErrNoBitAvailable + if err == nil { + h.curr = ret + 1 } } else { bytePos, bitPos, err = checkIfAvailable(h.head, ordinal) @@ -515,6 +517,29 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { return invalidPos, invalidPos, ErrNoBitAvailable } +//getAvailableFromCurrent will look for available ordinal from the current ordinal. +// If none found then it will loop back to the start to check of the available bit. +//This can be further optimized to check from start till curr in case of a rollover +func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { + var bytePos, bitPos uint64 + if curr != 0 && curr > start { + bytePos, bitPos, _ = getFirstAvailable(head, curr) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + goto begin + } + return bytePos, bitPos, nil + } + +begin: + bytePos, bitPos, _ = getFirstAvailable(head, start) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + return invalidPos, invalidPos, ErrNoBitAvailable + } + return bytePos, bitPos, nil +} + // checkIfAvailable checks if the bit correspondent to the specified ordinal is unset // If the ordinal is beyond the sequence limits, a negative response is returned func checkIfAvailable(head *sequence, ordinal uint64) (uint64, uint64, error) { diff --git a/bitseq/store.go b/bitseq/store.go index 5448927eb1..cdb7f04264 100644 --- a/bitseq/store.go +++ b/bitseq/store.go @@ -87,6 +87,7 @@ func (h *Handle) CopyTo(o datastore.KVObject) error { dstH.dbIndex = h.dbIndex dstH.dbExists = h.dbExists dstH.store = h.store + dstH.curr = h.curr dstH.Unlock() return nil diff --git a/libnetwork_internal_test.go b/libnetwork_internal_test.go index 58742cf5e1..805f3aaa8f 100644 --- a/libnetwork_internal_test.go +++ b/libnetwork_internal_test.go @@ -614,7 +614,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } defer ep.Delete(false) - expectedIP, _ := types.ParseCIDR("10.34.0.1/16") + expectedIP, _ := types.ParseCIDR("10.34.0.2/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) } From 19e576e1e35364464fcc28696595b319aa0205ad Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 17:41:21 -0700 Subject: [PATCH 2/3] Minor unit test change Since bit allocation is no longer first available from the start some verfications are removed/modified to the change allocation model Signed-off-by: Abhinandan Prativadi (cherry picked from commit 0184de8fc5074f94a74a0025ff60dab69cc442a9) Signed-off-by: Sebastiaan van Stijn --- bitseq/sequence_test.go | 202 ++++++++++++++++++++++++++++++++++++++-- ipam/allocator_test.go | 134 ++++++++++++++++++++++++++ 2 files changed, 326 insertions(+), 10 deletions(-) diff --git a/bitseq/sequence_test.go b/bitseq/sequence_test.go index 821b90ec35..83063f2635 100644 --- a/bitseq/sequence_test.go +++ b/bitseq/sequence_test.go @@ -1,4 +1,4 @@ -package bitseq +SetAny(false)package bitseq import ( "fmt" @@ -562,7 +562,7 @@ func TestSet(t *testing.T) { t.Fatal("Expected failure, but succeeded") } - os, err := hnd.SetAny() + os, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure: %v", err) } @@ -606,11 +606,11 @@ func TestSetUnset(t *testing.T) { // set and unset all one by one for hnd.Unselected() > 0 { - if _, err := hnd.SetAny(); err != nil { + if _, err := hnd.SetAny(false); err != nil { t.Fatal(err) } } - if _, err := hnd.SetAny(); err != ErrNoBitAvailable { + if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { @@ -638,12 +638,12 @@ func TestOffsetSetUnset(t *testing.T) { // set and unset all one by one for hnd.Unselected() > 0 { - if _, err := hnd.SetAny(); err != nil { + if _, err := hnd.SetAny(false); err != nil { t.Fatal(err) } } - if _, err := hnd.SetAny(); err != ErrNoBitAvailable { + if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } @@ -785,6 +785,126 @@ func TestSetInRange(t *testing.T) { } } +func TestSetInRangeSerial(t *testing.T) { + numBits := uint64(1024 * blockLen) + hnd, err := NewHandle("", nil, "", numBits) + if err != nil { + t.Fatal(err) + } + hnd.head = getTestSequence() + + firstAv := uint64(100*blockLen + blockLen - 1) + + if o, err := hnd.SetAnyInRange(4, 3); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(0, numBits); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) + if err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + if o != firstAv { + t.Fatalf("Unexpected ordinal: %d", o) + } + + if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err != nil { + t.Fatal(err) + } + if o != 161*uint64(blockLen)+30 { + t.Fatalf("Unexpected ordinal: %d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err != nil { + t.Fatal(err) + } + if o != 161*uint64(blockLen)+31 { + t.Fatalf("Unexpected ordinal: %d", o) + } + + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + + if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { + t.Fatalf("Unexpected failure: %v", err) + } + + // set one bit using the set range with 1 bit size range + if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { + t.Fatal(err) + } + + // create a non multiple of 32 mask + hnd, err = NewHandle("", nil, "", 30) + if err != nil { + t.Fatal(err) + } + + // set all bit in the first range + for hnd.Unselected() > 22 { + if o, err := hnd.SetAnyInRange(0, 7); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + // try one more set, which should fail + o, err = hnd.SetAnyInRange(0, 7) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } + + // set all bit in a second range + for hnd.Unselected() > 14 { + if o, err := hnd.SetAnyInRange(8, 15); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + + // try one more set, which should fail + o, err = hnd.SetAnyInRange(0, 15) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } + + // set all bit in a range which includes the last bit + for hnd.Unselected() > 12 { + if o, err := hnd.SetAnyInRange(28, 29); err != nil { + t.Fatalf("Unexpected failure: (%d, %v)", o, err) + } + } + o, err = hnd.SetAnyInRange(28, 29) + if err == nil { + t.Fatalf("Expected failure. Got success with ordinal:%d", o) + } + if err != ErrNoBitAvailable { + t.Fatalf("Unexpected error: %v", err) + } +} + // This one tests an allocation pattern which unveiled an issue in pushReservation // Specifically a failure in detecting when we are in the (B) case (the bit to set // belongs to the last block of the current sequence). Because of a bug, code @@ -861,7 +981,7 @@ func TestMethods(t *testing.T) { } for i := 0; i < 192; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatal(err) } @@ -941,7 +1061,7 @@ func TestAllocateRandomDeallocate(t *testing.T) { // Allocate first half of the bits for i := 0; i < numBits/2; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } @@ -971,7 +1091,7 @@ func TestAllocateRandomDeallocate(t *testing.T) { // Request a quarter of bits for i := 0; i < numBits/4; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) } @@ -989,6 +1109,68 @@ func TestAllocateRandomDeallocate(t *testing.T) { } } +func TestAllocateRandomDeallocateSerialize(t *testing.T) { + ds, err := randomLocalStore() + if err != nil { + t.Fatal(err) + } + + numBlocks := uint32(8) + numBits := int(numBlocks * blockLen) + hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits)) + if err != nil { + t.Fatal(err) + } + + expected := &sequence{block: 0xffffffff, count: uint64(numBlocks / 2), next: &sequence{block: 0x0, count: uint64(numBlocks / 2)}} + + // Allocate first half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) + } + } + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) + } + if !hnd.head.equal(expected) { + t.Fatalf("Unexpected sequence. Got:\n%s", hnd) + } + + seed := time.Now().Unix() + rand.Seed(seed) + + // Deallocate half of the allocated bits following a random pattern + pattern := rand.Perm(numBits / 2) + for i := 0; i < numBits/4; i++ { + bit := pattern[i] + err := hnd.Unset(uint64(bit)) + if err != nil { + t.Fatalf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(3*numBits/4) { + t.Fatalf("Expected full sequence. Instead found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd) + } + + // Request a quarter of bits + for i := 0; i < numBits/4; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected half sequence. Instead found %d free bits.\nSeed: %d\n%s", hnd.unselected, seed, hnd) + } + + err = hnd.Destroy() + if err != nil { + t.Fatal(err) + } +} + func TestRetrieveFromStore(t *testing.T) { ds, err := randomLocalStore() if err != nil { @@ -1003,7 +1185,7 @@ func TestRetrieveFromStore(t *testing.T) { // Allocate first half of the bits for i := 0; i < numBits/2; i++ { - _, err := hnd.SetAny() + _, err := hnd.SetAny(false) if err != nil { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } diff --git a/ipam/allocator_test.go b/ipam/allocator_test.go index 2ccf88084a..4454860e11 100644 --- a/ipam/allocator_test.go +++ b/ipam/allocator_test.go @@ -644,6 +644,7 @@ func TestRequestReleaseAddressFromSubPool(t *testing.T) { unoExp, _ := types.ParseCIDR("10.2.2.0/16") dueExp, _ := types.ParseCIDR("10.2.2.2/16") treExp, _ := types.ParseCIDR("10.2.2.1/16") + if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil { t.Fatal(err) } @@ -694,6 +695,139 @@ func TestRequestReleaseAddressFromSubPool(t *testing.T) { } } +func TestSerializeRequestReleaseAddressFromSubPool(t *testing.T) { + opts := map[string]string{ + ipamapi.AllocSerialPrefix: "true"} + a, err := getAllocator() + if err != nil { + t.Fatal(err) + } + a.addrSpaces["rosso"] = &addrSpace{ + id: dsConfigKey + "/" + "rosso", + ds: a.addrSpaces[localAddressSpace].ds, + alloc: a.addrSpaces[localAddressSpace].alloc, + scope: a.addrSpaces[localAddressSpace].scope, + subnets: map[SubnetKey]*PoolData{}, + } + + poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false) + if err != nil { + t.Fatal(err) + } + + var ip *net.IPNet + expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}} + for err == nil { + var c *net.IPNet + if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil { + ip = c + } + } + if err != ipamapi.ErrNoAvailableIPs { + t.Fatal(err) + } + if !types.CompareIPNet(expected, ip) { + t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip) + } + rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}} + if err = a.ReleaseAddress(poolID, rp.IP); err != nil { + t.Fatal(err) + } + if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(rp, ip) { + t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip) + } + + _, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false) + if err != nil { + t.Fatal(err) + } + poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false) + if err != nil { + t.Fatal(err) + } + expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}} + for err == nil { + var c *net.IPNet + if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil { + ip = c + } + } + if err != ipamapi.ErrNoAvailableIPs { + t.Fatal(err) + } + if !types.CompareIPNet(expected, ip) { + t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip) + } + rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}} + if err = a.ReleaseAddress(poolID, rp.IP); err != nil { + t.Fatal(err) + } + if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(rp, ip) { + t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip) + } + + // Request any addresses from subpool after explicit address request + unoExp, _ := types.ParseCIDR("10.2.2.0/16") + dueExp, _ := types.ParseCIDR("10.2.2.2/16") + treExp, _ := types.ParseCIDR("10.2.2.1/16") + quaExp, _ := types.ParseCIDR("10.2.2.3/16") + fivExp, _ := types.ParseCIDR("10.2.2.4/16") + if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil { + t.Fatal(err) + } + tre, _, err := a.RequestAddress(poolID, treExp.IP, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(tre, treExp) { + t.Fatalf("Unexpected address: %v", tre) + } + + uno, _, err := a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(uno, unoExp) { + t.Fatalf("Unexpected address: %v", uno) + } + + due, _, err := a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(due, dueExp) { + t.Fatalf("Unexpected address: %v", due) + } + + if err = a.ReleaseAddress(poolID, uno.IP); err != nil { + t.Fatal(err) + } + uno, _, err = a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(uno, quaExp) { + t.Fatalf("Unexpected address: %v", uno) + } + + if err = a.ReleaseAddress(poolID, tre.IP); err != nil { + t.Fatal(err) + } + tre, _, err = a.RequestAddress(poolID, nil, opts) + if err != nil { + t.Fatal(err) + } + if !types.CompareIPNet(tre, fivExp) { + t.Fatalf("Unexpected address: %v", tre) + } +} + func TestGetAddress(t *testing.T) { input := []string{ /*"10.0.0.0/8", "10.0.0.0/9", "10.0.0.0/10",*/ "10.0.0.0/11", "10.0.0.0/12", "10.0.0.0/13", "10.0.0.0/14", From b96f5e476d53f86c26399bbf22970eb51a8797d0 Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 19:41:21 -0700 Subject: [PATCH 3/3] Adding a unit case to verify rollover Signed-off-by: Abhinandan Prativadi (cherry picked from commit a2bcac00122a43d771af77ff060794dd02bb4a00) Signed-off-by: Sebastiaan van Stijn --- bitseq/sequence.go | 24 +- bitseq/sequence_test.go | 244 +++++++----------- drivers/overlay/ov_network.go | 2 +- drivers/overlay/ovmanager/ovmanager.go | 2 +- drivers/solaris/overlay/ov_network.go | 2 +- .../solaris/overlay/ovmanager/ovmanager.go | 2 +- idm/idm.go | 8 +- idm/idm_test.go | 92 +++++-- ipam/allocator.go | 16 +- ipam/allocator_test.go | 2 +- ipamapi/labels.go | 10 + libnetwork_internal_test.go | 2 +- 12 files changed, 221 insertions(+), 185 deletions(-) create mode 100644 ipamapi/labels.go diff --git a/bitseq/sequence.go b/bitseq/sequence.go index ade40633f0..a1a9810dc5 100644 --- a/bitseq/sequence.go +++ b/bitseq/sequence.go @@ -199,22 +199,22 @@ func (h *Handle) getCopy() *Handle { } // SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal -func (h *Handle) SetAnyInRange(start, end uint64) (uint64, error) { +func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) { if end < start || end >= h.bits { return invalidPos, fmt.Errorf("invalid bit range [%d, %d]", start, end) } if h.Unselected() == 0 { return invalidPos, ErrNoBitAvailable } - return h.set(0, start, end, true, false) + return h.set(0, start, end, true, false, serial) } // SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal -func (h *Handle) SetAny() (uint64, error) { +func (h *Handle) SetAny(serial bool) (uint64, error) { if h.Unselected() == 0 { return invalidPos, ErrNoBitAvailable } - return h.set(0, 0, h.bits-1, true, false) + return h.set(0, 0, h.bits-1, true, false, serial) } // Set atomically sets the corresponding bit in the sequence @@ -222,7 +222,7 @@ func (h *Handle) Set(ordinal uint64) error { if err := h.validateOrdinal(ordinal); err != nil { return err } - _, err := h.set(ordinal, 0, 0, false, false) + _, err := h.set(ordinal, 0, 0, false, false, false) return err } @@ -231,7 +231,7 @@ func (h *Handle) Unset(ordinal uint64) error { if err := h.validateOrdinal(ordinal); err != nil { return err } - _, err := h.set(ordinal, 0, 0, false, true) + _, err := h.set(ordinal, 0, 0, false, true, false) return err } @@ -300,7 +300,7 @@ func (h *Handle) CheckConsistency() error { } // set/reset the bit -func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64, error) { +func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) { var ( bitPos uint64 bytePos uint64 @@ -310,6 +310,7 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 for { var store datastore.DataStore + curr := uint64(0) h.Lock() store = h.store h.Unlock() @@ -320,12 +321,15 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 } h.Lock() + if serial { + curr = h.curr + } // Get position if available if release { bytePos, bitPos = ordinalToPos(ordinal) } else { if any { - bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, h.curr, end) + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end) ret = posToOrdinal(bytePos, bitPos) if err == nil { h.curr = ret + 1 @@ -517,9 +521,9 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { return invalidPos, invalidPos, ErrNoBitAvailable } -//getAvailableFromCurrent will look for available ordinal from the current ordinal. +// getAvailableFromCurrent will look for available ordinal from the current ordinal. // If none found then it will loop back to the start to check of the available bit. -//This can be further optimized to check from start till curr in case of a rollover +// This can be further optimized to check from start till curr in case of a rollover func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { var bytePos, bitPos uint64 if curr != 0 && curr > start { diff --git a/bitseq/sequence_test.go b/bitseq/sequence_test.go index 83063f2635..dc63706b39 100644 --- a/bitseq/sequence_test.go +++ b/bitseq/sequence_test.go @@ -1,4 +1,4 @@ -SetAny(false)package bitseq +package bitseq import ( "fmt" @@ -613,7 +613,7 @@ func TestSetUnset(t *testing.T) { if _, err := hnd.SetAny(false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } - if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { + if _, err := hnd.SetAnyInRange(10, 20, false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } if err := hnd.Set(50); err != ErrBitAllocated { @@ -647,7 +647,7 @@ func TestOffsetSetUnset(t *testing.T) { t.Fatal("Expected error. Got success") } - if _, err := hnd.SetAnyInRange(10, 20); err != ErrNoBitAvailable { + if _, err := hnd.SetAnyInRange(10, 20, false); err != ErrNoBitAvailable { t.Fatal("Expected error. Got success") } @@ -656,7 +656,7 @@ func TestOffsetSetUnset(t *testing.T) { } //At this point sequence is (0xffffffff, 9)->(0x7fffffff, 1)->(0xffffffff, 22)->end - if o, err = hnd.SetAnyInRange(32, 500); err != nil { + if o, err = hnd.SetAnyInRange(32, 500, false); err != nil { t.Fatal(err) } @@ -675,15 +675,15 @@ func TestSetInRange(t *testing.T) { firstAv := uint64(100*blockLen + blockLen - 1) - if o, err := hnd.SetAnyInRange(4, 3); err == nil { + if o, err := hnd.SetAnyInRange(4, 3, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(0, numBits); err == nil { + if o, err := hnd.SetAnyInRange(0, numBits, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) + o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen), false) if err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } @@ -691,19 +691,19 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { + if o, err := hnd.SetAnyInRange(0, uint64(blockLen), false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { + if o, err := hnd.SetAnyInRange(0, firstAv-1, false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { + if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen), false); err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err != nil { t.Fatal(err) } @@ -711,7 +711,7 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err != nil { t.Fatal(err) } @@ -719,17 +719,17 @@ func TestSetInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) + o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen), false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } - if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { + if _, err := hnd.SetAnyInRange(0, numBits-1, false); err != nil { t.Fatalf("Unexpected failure: %v", err) } // set one bit using the set range with 1 bit size range - if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { + if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1), false); err != nil { t.Fatal(err) } @@ -741,12 +741,12 @@ func TestSetInRange(t *testing.T) { // set all bit in the first range for hnd.Unselected() > 22 { - if o, err := hnd.SetAnyInRange(0, 7); err != nil { + if o, err := hnd.SetAnyInRange(0, 7, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 7) + o, err = hnd.SetAnyInRange(0, 7, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -756,13 +756,13 @@ func TestSetInRange(t *testing.T) { // set all bit in a second range for hnd.Unselected() > 14 { - if o, err := hnd.SetAnyInRange(8, 15); err != nil { + if o, err := hnd.SetAnyInRange(8, 15, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 15) + o, err = hnd.SetAnyInRange(0, 15, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -772,131 +772,11 @@ func TestSetInRange(t *testing.T) { // set all bit in a range which includes the last bit for hnd.Unselected() > 12 { - if o, err := hnd.SetAnyInRange(28, 29); err != nil { + if o, err := hnd.SetAnyInRange(28, 29, false); err != nil { t.Fatalf("Unexpected failure: (%d, %v)", o, err) } } - o, err = hnd.SetAnyInRange(28, 29) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestSetInRangeSerial(t *testing.T) { - numBits := uint64(1024 * blockLen) - hnd, err := NewHandle("", nil, "", numBits) - if err != nil { - t.Fatal(err) - } - hnd.head = getTestSequence() - - firstAv := uint64(100*blockLen + blockLen - 1) - - if o, err := hnd.SetAnyInRange(4, 3); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(0, numBits); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - o, err := hnd.SetAnyInRange(100*uint64(blockLen), 101*uint64(blockLen)) - if err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - if o != firstAv { - t.Fatalf("Unexpected ordinal: %d", o) - } - - if o, err := hnd.SetAnyInRange(0, uint64(blockLen)); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(0, firstAv-1); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if o, err := hnd.SetAnyInRange(111*uint64(blockLen), 161*uint64(blockLen)); err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err != nil { - t.Fatal(err) - } - if o != 161*uint64(blockLen)+30 { - t.Fatalf("Unexpected ordinal: %d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err != nil { - t.Fatal(err) - } - if o != 161*uint64(blockLen)+31 { - t.Fatalf("Unexpected ordinal: %d", o) - } - - o, err = hnd.SetAnyInRange(161*uint64(blockLen), 162*uint64(blockLen)) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - - if _, err := hnd.SetAnyInRange(0, numBits-1); err != nil { - t.Fatalf("Unexpected failure: %v", err) - } - - // set one bit using the set range with 1 bit size range - if _, err := hnd.SetAnyInRange(uint64(163*blockLen-1), uint64(163*blockLen-1)); err != nil { - t.Fatal(err) - } - - // create a non multiple of 32 mask - hnd, err = NewHandle("", nil, "", 30) - if err != nil { - t.Fatal(err) - } - - // set all bit in the first range - for hnd.Unselected() > 22 { - if o, err := hnd.SetAnyInRange(0, 7); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 7) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } - - // set all bit in a second range - for hnd.Unselected() > 14 { - if o, err := hnd.SetAnyInRange(8, 15); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - - // try one more set, which should fail - o, err = hnd.SetAnyInRange(0, 15) - if err == nil { - t.Fatalf("Expected failure. Got success with ordinal:%d", o) - } - if err != ErrNoBitAvailable { - t.Fatalf("Unexpected error: %v", err) - } - - // set all bit in a range which includes the last bit - for hnd.Unselected() > 12 { - if o, err := hnd.SetAnyInRange(28, 29); err != nil { - t.Fatalf("Unexpected failure: (%d, %v)", o, err) - } - } - o, err = hnd.SetAnyInRange(28, 29) + o, err = hnd.SetAnyInRange(28, 29, false) if err == nil { t.Fatalf("Expected failure. Got success with ordinal:%d", o) } @@ -926,7 +806,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatal(err) } - o, err := hnd.SetAnyInRange(128, 255) + o, err := hnd.SetAnyInRange(128, 255, false) if err != nil { t.Fatal(err) } @@ -934,7 +814,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(128, 255) + o, err = hnd.SetAnyInRange(128, 255, false) if err != nil { t.Fatal(err) } @@ -943,7 +823,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(246, 255) + o, err = hnd.SetAnyInRange(246, 255, false) if err != nil { t.Fatal(err) } @@ -951,7 +831,7 @@ func TestSetAnyInRange(t *testing.T) { t.Fatalf("Unexpected ordinal: %d", o) } - o, err = hnd.SetAnyInRange(246, 255) + o, err = hnd.SetAnyInRange(246, 255, false) if err != nil { t.Fatal(err) } @@ -1131,6 +1011,7 @@ func TestAllocateRandomDeallocateSerialize(t *testing.T) { t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) } } + if hnd.Unselected() != uint64(numBits/2) { t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) } @@ -1356,3 +1237,76 @@ func TestIsCorrupted(t *testing.T) { } } } + +func TestSetRollover(t *testing.T) { + ds, err := randomLocalStore() + if err != nil { + t.Fatal(err) + } + + numBlocks := uint32(8) + numBits := int(numBlocks * blockLen) + hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits)) + if err != nil { + t.Fatal(err) + } + + // Allocate first half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd) + } + } + + if hnd.Unselected() != uint64(numBits/2) { + t.Fatalf("Expected full sequence. Instead found %d free bits. %s", hnd.unselected, hnd) + } + + seed := time.Now().Unix() + rand.Seed(seed) + + // Deallocate half of the allocated bits following a random pattern + pattern := rand.Perm(numBits / 2) + for i := 0; i < numBits/4; i++ { + bit := pattern[i] + err := hnd.Unset(uint64(bit)) + if err != nil { + t.Fatalf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd) + } + } + if hnd.Unselected() != uint64(3*numBits/4) { + t.Fatalf("Expected full sequence. Instead found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd) + } + + //request to allocate for remaining half of the bits + for i := 0; i < numBits/2; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + + //At this point all the bits must be allocated except the randomly unallocated bits + //which were unallocated in the first half of the bit sequence + if hnd.Unselected() != uint64(numBits/4) { + t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), numBits/4) + } + + for i := 0; i < numBits/4; i++ { + _, err := hnd.SetAny(true) + if err != nil { + t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd) + } + } + //Now requesting to allocate the unallocated random bits (qurter of the number of bits) should + //leave no more bits that can be allocated. + if hnd.Unselected() != 0 { + t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), numBits/4) + } + + err = hnd.Destroy() + if err != nil { + t.Fatal(err) + } +} diff --git a/drivers/overlay/ov_network.go b/drivers/overlay/ov_network.go index 126093fefe..4fbb976643 100644 --- a/drivers/overlay/ov_network.go +++ b/drivers/overlay/ov_network.go @@ -1058,7 +1058,7 @@ func (n *network) obtainVxlanID(s *subnet) error { } if s.vni == 0 { - vxlanID, err := n.driver.vxlanIdm.GetID() + vxlanID, err := n.driver.vxlanIdm.GetID(true) if err != nil { return fmt.Errorf("failed to allocate vxlan id: %v", err) } diff --git a/drivers/overlay/ovmanager/ovmanager.go b/drivers/overlay/ovmanager/ovmanager.go index a80f335892..58cc687d4f 100644 --- a/drivers/overlay/ovmanager/ovmanager.go +++ b/drivers/overlay/ovmanager/ovmanager.go @@ -165,7 +165,7 @@ func (n *network) obtainVxlanID(s *subnet) error { n.Unlock() if vni == 0 { - vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd) + vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd, true) if err != nil { return err } diff --git a/drivers/solaris/overlay/ov_network.go b/drivers/solaris/overlay/ov_network.go index b545bc8903..e490c18d74 100644 --- a/drivers/solaris/overlay/ov_network.go +++ b/drivers/solaris/overlay/ov_network.go @@ -718,7 +718,7 @@ func (n *network) obtainVxlanID(s *subnet) error { } if s.vni == 0 { - vxlanID, err := n.driver.vxlanIdm.GetID() + vxlanID, err := n.driver.vxlanIdm.GetID(true) if err != nil { return fmt.Errorf("failed to allocate vxlan id: %v", err) } diff --git a/drivers/solaris/overlay/ovmanager/ovmanager.go b/drivers/solaris/overlay/ovmanager/ovmanager.go index c39f53aefa..430e38a4a3 100644 --- a/drivers/solaris/overlay/ovmanager/ovmanager.go +++ b/drivers/solaris/overlay/ovmanager/ovmanager.go @@ -165,7 +165,7 @@ func (n *network) obtainVxlanID(s *subnet) error { n.Unlock() if vni == 0 { - vni, err = n.driver.vxlanIdm.GetID() + vni, err = n.driver.vxlanIdm.GetID(true) if err != nil { return err } diff --git a/idm/idm.go b/idm/idm.go index 7e449a0dc8..d5843d4a58 100644 --- a/idm/idm.go +++ b/idm/idm.go @@ -34,11 +34,11 @@ func New(ds datastore.DataStore, id string, start, end uint64) (*Idm, error) { } // GetID returns the first available id in the set -func (i *Idm) GetID() (uint64, error) { +func (i *Idm) GetID(serial bool) (uint64, error) { if i.handle == nil { return 0, errors.New("ID set is not initialized") } - ordinal, err := i.handle.SetAny() + ordinal, err := i.handle.SetAny(serial) return i.start + ordinal, err } @@ -56,7 +56,7 @@ func (i *Idm) GetSpecificID(id uint64) error { } // GetIDInRange returns the first available id in the set within a [start,end] range -func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) { +func (i *Idm) GetIDInRange(start, end uint64, serial bool) (uint64, error) { if i.handle == nil { return 0, errors.New("ID set is not initialized") } @@ -65,7 +65,7 @@ func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) { return 0, errors.New("Requested range does not belong to the set") } - ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start) + ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start, serial) return i.start + ordinal, err } diff --git a/idm/idm_test.go b/idm/idm_test.go index 0126696974..74bbbc20f9 100644 --- a/idm/idm_test.go +++ b/idm/idm_test.go @@ -46,7 +46,7 @@ func TestAllocate(t *testing.T) { t.Fatal("Expected failure but succeeded") } - o, err := i.GetID() + o, err := i.GetID(false) if err != nil { t.Fatal(err) } @@ -59,7 +59,7 @@ func TestAllocate(t *testing.T) { t.Fatal(err) } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -67,7 +67,7 @@ func TestAllocate(t *testing.T) { t.Fatalf("Unexpected id returned: %d", o) } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -75,14 +75,14 @@ func TestAllocate(t *testing.T) { t.Fatalf("Unexpected id returned: %d", o) } - o, err = i.GetID() + o, err = i.GetID(false) if err == nil { t.Fatalf("Expected failure but succeeded: %d", o) } i.Release(50) - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -100,7 +100,7 @@ func TestAllocate(t *testing.T) { func TestUninitialized(t *testing.T) { i := &Idm{} - if _, err := i.GetID(); err == nil { + if _, err := i.GetID(false); err == nil { t.Fatal("Expected failure but succeeded") } @@ -115,7 +115,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatal(err) } - o, err := i.GetIDInRange(6, 6) + o, err := i.GetIDInRange(6, 6, false) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Expected failure but succeeded") } - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(6) - o, err = i.GetID() + o, err = i.GetID(false) if err != nil { t.Fatal(err) } @@ -146,7 +146,7 @@ func TestAllocateInRange(t *testing.T) { } for n := 7; n <= 10; n++ { - o, err := i.GetIDInRange(7, 10) + o, err := i.GetIDInRange(7, 10, false) if err != nil { t.Fatal(err) } @@ -165,7 +165,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(10) - o, err = i.GetIDInRange(5, 10) + o, err = i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -175,7 +175,7 @@ func TestAllocateInRange(t *testing.T) { i.Release(5) - o, err = i.GetIDInRange(5, 10) + o, err = i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -188,7 +188,7 @@ func TestAllocateInRange(t *testing.T) { } for n := 5; n <= 10; n++ { - o, err := i.GetIDInRange(5, 10) + o, err := i.GetIDInRange(5, 10, false) if err != nil { t.Fatal(err) } @@ -210,7 +210,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatal(err) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -218,7 +218,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4096. Got: %d", o) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -226,7 +226,7 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4097. Got: %d", o) } - o, err = i.GetIDInRange(4096, ul) + o, err = i.GetIDInRange(4096, ul, false) if err != nil { t.Fatal(err) } @@ -234,3 +234,63 @@ func TestAllocateInRange(t *testing.T) { t.Fatalf("Unexpected id returned. Expected: 4098. Got: %d", o) } } + +func TestAllocateSerial(t *testing.T) { + i, err := New(nil, "myids", 50, 55) + if err != nil { + t.Fatal(err) + } + + if err = i.GetSpecificID(49); err == nil { + t.Fatal("Expected failure but succeeded") + } + + if err = i.GetSpecificID(56); err == nil { + t.Fatal("Expected failure but succeeded") + } + + o, err := i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 50 { + t.Fatalf("Unexpected first id returned: %d", o) + } + + err = i.GetSpecificID(50) + if err == nil { + t.Fatal(err) + } + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 51 { + t.Fatalf("Unexpected id returned: %d", o) + } + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 52 { + t.Fatalf("Unexpected id returned: %d", o) + } + + i.Release(50) + + o, err = i.GetID(true) + if err != nil { + t.Fatal(err) + } + if o != 53 { + t.Fatal("Unexpected id returned") + } + + i.Release(52) + err = i.GetSpecificID(52) + if err != nil { + t.Fatal(err) + } +} diff --git a/ipam/allocator.go b/ipam/allocator.go index 71c9f39531..c9769220be 100644 --- a/ipam/allocator.go +++ b/ipam/allocator.go @@ -457,7 +457,15 @@ func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s return nil, nil, types.InternalErrorf("could not find bitmask in datastore for %s on address %v request from pool %s: %v", k.String(), prefAddress, poolID, err) } - ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range) + // In order to request for a serial ip address allocation, callers can pass in the option to request + // IP allocation serially or first available IP in the subnet + var serial bool + if opts != nil { + if val, ok := opts[ipamapi.AllocSerialPrefix]; ok { + serial = (val == "true") + } + } + ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range, serial) if err != nil { return nil, nil, err } @@ -522,7 +530,7 @@ func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error { return bm.Unset(ipToUint64(h)) } -func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange) (net.IP, error) { +func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange, serial bool) (net.IP, error) { var ( ordinal uint64 err error @@ -535,7 +543,7 @@ func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddres return nil, ipamapi.ErrNoAvailableIPs } if ipr == nil && prefAddress == nil { - ordinal, err = bitmask.SetAny() + ordinal, err = bitmask.SetAny(serial) } else if prefAddress != nil { hostPart, e := types.GetHostPartIP(prefAddress, base.Mask) if e != nil { @@ -544,7 +552,7 @@ func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddres ordinal = ipToUint64(types.GetMinimalIP(hostPart)) err = bitmask.Set(ordinal) } else { - ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End) + ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End, serial) } switch err { diff --git a/ipam/allocator_test.go b/ipam/allocator_test.go index 4454860e11..7108eaa644 100644 --- a/ipam/allocator_test.go +++ b/ipam/allocator_test.go @@ -1033,7 +1033,7 @@ func assertGetAddress(t *testing.T, subnet string) { start := time.Now() run := 0 for err != ipamapi.ErrNoAvailableIPs { - _, err = a.getAddress(sub, bm, nil, nil) + _, err = a.getAddress(sub, bm, nil, nil, false) run++ } if printTime { diff --git a/ipamapi/labels.go b/ipamapi/labels.go new file mode 100644 index 0000000000..e5c7d1cc7e --- /dev/null +++ b/ipamapi/labels.go @@ -0,0 +1,10 @@ +package ipamapi + +const ( + // Prefix constant marks the reserved label space for libnetwork + Prefix = "com.docker.network" + + // AllocSerialPrefix constant marks the reserved label space for libnetwork ipam + // allocation ordering.(serial/first available) + AllocSerialPrefix = Prefix + ".ipam.serial" +) diff --git a/libnetwork_internal_test.go b/libnetwork_internal_test.go index 805f3aaa8f..58742cf5e1 100644 --- a/libnetwork_internal_test.go +++ b/libnetwork_internal_test.go @@ -614,7 +614,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } defer ep.Delete(false) - expectedIP, _ := types.ParseCIDR("10.34.0.2/16") + expectedIP, _ := types.ParseCIDR("10.34.0.1/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) }